diff --git a/.github/actions/debian/action.yml b/.github/actions/debian/action.yml index 888dec4d5a..99d65d069c 100644 --- a/.github/actions/debian/action.yml +++ b/.github/actions/debian/action.yml @@ -28,15 +28,15 @@ inputs: options: - miden-node - miden-remote-prover - service: + package: required: true - description: The service to build the packages for. + description: The Debian package name. type: choice options: - miden-node - miden-prover - miden-prover-proxy - package: + packaging_dir: required: true description: Name of packaging directory. type: choice @@ -78,7 +78,7 @@ runs: - name: Create package directories shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} mkdir -p \ packaging/deb/$pkg/DEBIAN \ packaging/deb/$pkg/usr/bin \ @@ -89,15 +89,18 @@ runs: - name: Copy package install scripts shell: bash run: | - svc=${{ inputs.service }} pkg=${{ inputs.package }} + pkg_dir=${{ inputs.packaging_dir }} crate=${{ inputs.crate_dir }} - git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$svc/lib/systemd/system/$svc.env - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/$svc.service > packaging/deb/$svc/lib/systemd/system/$svc.service - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postinst > packaging/deb/$svc/DEBIAN/postinst - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postrm > packaging/deb/$svc/DEBIAN/postrm - chmod 0775 packaging/deb/$svc/DEBIAN/postinst - chmod 0775 packaging/deb/$svc/DEBIAN/postrm + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postinst > packaging/deb/$pkg/DEBIAN/postinst + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postrm > packaging/deb/$pkg/DEBIAN/postrm + for service_file in $(ls packaging/$pkg_dir/*.service | sed "s/.*miden/miden/g"); do + svc=$(echo $service_file | sed "s/.service//g") + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/$service_file > packaging/deb/$pkg/lib/systemd/system/$service_file + git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$pkg/lib/systemd/system/$svc.env + done + chmod 0775 packaging/deb/$pkg/DEBIAN/postinst + chmod 0775 packaging/deb/$pkg/DEBIAN/postrm - name: Create control files shell: bash @@ -108,7 +111,7 @@ runs: # Control file's version field must be x.y.z format so strip the rest. version=$(git describe --tags --abbrev=0 | sed 's/[^0-9.]//g' ) - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} cat > packaging/deb/$pkg/DEBIAN/control << EOF Package: $pkg Version: $version @@ -118,8 +121,8 @@ runs: Maintainer: Miden Description: $pkg binary package Homepage: https://miden.xyz - Vcs-Git: git@github.com:0xMiden/miden-node.git - Vcs-Browser: https://github.com/0xMiden/miden-node + Vcs-Git: git@github.com:0xMiden/node.git + Vcs-Browser: https://github.com/0xMiden/node EOF - name: Build binaries @@ -132,14 +135,14 @@ runs: - name: Copy binary files shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} bin=${{ inputs.crate }} cp -p ./bin/$bin packaging/deb/$pkg/usr/bin/ - name: Build packages shell: bash run: | - dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.service }} + dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.package }} # Save the .deb files, delete the rest. mv packaging/deb/*.deb . @@ -148,12 +151,12 @@ runs: - name: Package names shell: bash run: | - echo "package=${{ inputs.service }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV + echo "package=${{ inputs.package }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV - name: Rename package files shell: bash run: | - mv ${{ inputs.service }}.deb ${{ env.package }} + mv ${{ inputs.package}}.deb ${{ env.package }} - name: shasum packages shell: bash diff --git a/.github/actions/install-protobuf-compiler/action.yml b/.github/actions/install-protobuf-compiler/action.yml new file mode 100644 index 0000000000..4ef5c3fc6c --- /dev/null +++ b/.github/actions/install-protobuf-compiler/action.yml @@ -0,0 +1,13 @@ + +name: "Install protobuf compiler" +description: "Install compiler for protobuf compilation" + +runs: + using: "composite" + steps: + - name: Install protobuf compiler + shell: bash + run: | + set -eux + sudo apt-get update + sudo apt-get install -y protobuf-compiler diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 0e7fe0c073..b259c23fd9 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -12,38 +12,16 @@ permissions: jobs: docker-build: - strategy: - matrix: - component: [node] runs-on: Linux-ARM64-Runner - name: Build ${{ matrix.component }} steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Configure AWS credentials - if: github.event.pull_request.head.repo.fork == false - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: ${{ secrets.AWS_REGION }} - role-to-assume: ${{ secrets.AWS_ROLE }} - role-session-name: GithubActionsSession - - - name: Set cache parameters - if: github.event.pull_request.head.repo.fork == false - run: | - echo "CACHE_FROM=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - echo "CACHE_TO=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - with: - cache-binary: true - - name: Build Docker image - uses: docker/build-push-action@v5 + - name: Build and push + uses: docker/build-push-action@v6 with: push: false - file: ./bin/${{ matrix.component }}/Dockerfile - cache-from: ${{ env.CACHE_FROM || '' }} - cache-to: ${{ env.CACHE_TO || '' }} + file: ./bin/node/Dockerfile + cache-from: type=gha + # Only save cache on push into next + cache-to: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' && 'type=gha,mode=max' || '' }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 016aeba77a..cf3ceddf67 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,7 +24,7 @@ on: env: # Shared prefix key for the rust cache. - # + # # This provides a convenient way to evict old or corrupted cache. RUST_CACHE_KEY: rust-cache-2026.02.02 # Reduce cache usage by removing debug information. @@ -47,8 +47,8 @@ jobs: - uses: actions/checkout@v6 - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 @@ -58,7 +58,44 @@ jobs: save-if: ${{ github.ref == 'refs/heads/next' }} - name: cargo build run: cargo build --workspace --all-targets --locked - + - name: Check static linkage + run: | + # Ensure database libraries are statically linked to avoid system library dependencies + # + # It explodes our possible dependency matrix when debugging, particularly + # in the case of sqlite and rocksdb as embedded databases, we want them + # shipped in identical versions we test with. Those are notoriously difficult + # to compile time configure and OSes make very opinionated choices. + metadata=$(cargo metadata --no-deps --format-version 1) + mapfile -t bin_targets < <( + echo "${metadata}" | jq -r '.packages[].targets[] | select(.kind[] == "bin") | .name' | sort -u + ) + if [[ ${#bin_targets[@]} -eq 0 ]]; then + echo "error: No binary targets found in cargo manifest." + exit 1 + fi + for bin_target in "${bin_targets[@]}"; do + # Ensure the binary was built by the previous step. + binary_path="target/debug/${bin_target}" + if ! [[ -x "${binary_path}" ]]; then + echo "error: Missing binary or missing executable bit: ${binary_path}"; + exit 2; + fi + # ldd exits non-zero for static binaries, so we inspect its output instead. + # if ldd fails we use an empty string instead + ldd_output="$(ldd "${binary_path}" 2>&1 || true)" + if echo "${ldd_output}" | grep -E -q 'not a dynamic executable'; then + continue + fi + # librocksdb/libsqlite entries indicate dynamic linkage (bad). + if echo "${ldd_output}" | grep -E -q 'librocksdb|libsqlite'; then + echo "error: Dynamic linkage detected for ${bin_target}." + echo "${ldd_output}" + exit 3 + fi + done + echo "Static linkage check passed for all of ${bin_targets[@]}" + clippy: name: lint - clippy runs-on: ubuntu-24.04 @@ -115,29 +152,6 @@ jobs: - name: Build docs run: cargo doc --no-deps --workspace --all-features --locked - # Ensures our checked-in protobuf generated code is aligned to the protobuf schema. - # - # We do this by rebuilding the generated code and ensuring there is no diff. - proto: - name: gRPC codegen - needs: [build] - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v6 - - name: Rustup - run: rustup update --no-self-update - - name: Install protobuf - run: sudo apt-get update && sudo apt-get install -y protobuf-compiler - - uses: Swatinem/rust-cache@v2 - with: - shared-key: ${{ github.workflow }}-build - prefix-key: ${{ env.RUST_CACHE_KEY }} - save-if: false - - name: Rebuild protos - run: BUILD_PROTO=1 cargo check --all-features --all-targets --locked --workspace - - name: Diff check - run: git diff --exit-code - # Ensure the stress-test still functions by running some cheap benchmarks. stress-test: name: stress test @@ -167,11 +181,12 @@ jobs: cargo run --bin miden-node-stress-test seed-store \ --data-directory ${{ env.DATA_DIR }} \ --num-accounts 500 --public-accounts-percentage 50 - - name: Benchmark state sync - run: | - cargo run --bin miden-node-stress-test benchmark-store \ - --data-directory ${{ env.DATA_DIR }} \ - --iterations 10 --concurrency 1 sync-state + # TODO re-introduce + # - name: Benchmark state sync + # run: | + # cargo run --bin miden-node-stress-test benchmark-store \ + # --data-directory ${{ env.DATA_DIR }} \ + # --iterations 10 --concurrency 1 sync-state - name: Benchmark notes sync run: | cargo run --bin miden-node-stress-test benchmark-store \ diff --git a/.github/workflows/cleanup-workflows.yml b/.github/workflows/cleanup-workflows.yml new file mode 100644 index 0000000000..a7a6d2b428 --- /dev/null +++ b/.github/workflows/cleanup-workflows.yml @@ -0,0 +1,284 @@ +# Manual workflow to cleanup deleted workflows runs. +# +# Github keeps workflows runs around even if the workflow is deleted. +# This has the side effect that these still display in the UI which gets cluttered. +# Once the runs of a workflow are deleted, they also get removed from the UI. +name: Cleanup Workflow + +on: + workflow_dispatch: + inputs: + mode: + description: "Choose 'dry run' to preview or 'execute' to delete runs" + required: true + default: "dry run" + type: choice + options: + - "dry run" + - "execute" + +jobs: + cleanup: + name: Cleanup deleted workflows + runs-on: ubuntu-latest + permissions: + actions: write # required for deleting workflow runs + contents: read + + steps: + - name: Checkout repo + uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - name: Workflows on main + id: main + run: | + git fetch origin main + WORKFLOWS=$(git ls-tree -r origin/main --name-only | grep '^.github/workflows/') + printf "%s\n" $WORKFLOWS + { + echo "workflows<> "$GITHUB_OUTPUT" + + - name: Workflows on next + id: next + run: | + git fetch origin next + WORKFLOWS=$(git ls-tree -r origin/next --name-only | grep '^.github/workflows/') + printf "%s\n" $WORKFLOWS + { + echo "workflows<> "$GITHUB_OUTPUT" + + - name: Filter for deleted workflows + id: deleted + env: + GH_TOKEN: ${{ github.token }} + run: | + set -euo pipefail + + # Union of `main` and `next` workflows as a JSON array of strings (paths) + EXISTING=$(printf "%s\n%s\n" \ + "${{ steps.main.outputs.workflows }}" \ + "${{ steps.next.outputs.workflows }}" \ + ) + EXISTING=$(echo "$EXISTING" | sort -u | jq -R . | jq -s .) + + echo "Existing workflows:" + echo "$EXISTING" + + # Get workflows currently on GitHub as JSON array of objects + GITHUB=$(gh api repos/{owner}/{repo}/actions/workflows \ + --jq '.workflows[] | select(.path | startswith(".github")) | { name, node_id, path }' \ + | jq -s '.') + + echo "Workflows on GitHub:" + echo "$GITHUB" + + # Find deleted workflows: present on GitHub but not in main/next + DELETED=$(echo "$GITHUB" | jq -c \ + --argjson existing "$EXISTING" ' + map(select(.path as $p | $existing | index($p) | not)) + ' + ) + + echo "Deleted workflows:" + echo "$DELETED" + + # Output to GitHub Actions + { + echo "workflows<> "$GITHUB_OUTPUT" + + # Performs the actual run deletion. + # + # This contains a lot of code, but the vast majority is just pretty-printing. + - name: Delete runs from deleted workflows + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + MODE: ${{ inputs.mode }} + WORKFLOWS: ${{ steps.deleted.outputs.workflows }} + OWNER: ${{ github.repository_owner }} + REPO: ${{ github.repository }} + shell: bash --noprofile --norc -euo pipefail {0} + run: | + if [ -z "$WORKFLOWS" ]; then + echo "No workflows to delete." + exit 0 + fi + + # ================================================================================================ + # Utility functions + # ================================================================================================ + + # Fetches a page of workflow runs for a given workflow ID and cursor. + # + # We use github's graphql API here which allows us to paginate over workflow runs. + # Unfortunately `gh run list` does not support pagination, so we use the graphql API instead. + gh_workflow_run_page() { + local id="$1" + local cursor="$2" + + gh api graphql -F workflowId="$id" -F after="$cursor" \ + -f query='query($workflowId: ID!, $after: String) { + node(id: $workflowId) { + ... on Workflow { + runs(first: 100, after: $after) { + pageInfo { hasNextPage endCursor } + nodes { databaseId } + } + } + } + }' + } + + # ================================================================================================ + # Print helpers for nice progress and table display + # ================================================================================================ + + # Column widths (table includes three spacers for ' | ' between columns) + widths_index=9 + widths_name=30 + widths_count=14 + widths_total=12 + widths_table=$(( $widths_index + 3 + $widths_name + 3 + $widths_count + 3 + $widths_total )) + + # Repeats a character a given number of times. + repeat_char() { + local char=$1 + local count=$2 + printf "%0.s$char" $(seq 1 $count) + } + + # Prints the given header as `====
====` to match the table layout. + print_table_header() { + local header="$1" + local header_len=${#header} + local left_pad=$(( ( $widths_table - header_len - 2) / 2 )) + local right_pad=$(( $widths_table - header_len - 2 - left_pad )) + printf " \n%s %s %s\n" $(repeat_char = $left_pad) "$header" $(repeat_char = $right_pad) + } + + # Prints |---+---+---+---| with appropriate widths to accomodate the table headers. + print_table_separator() { + printf "%s+%s+%s+%s\n" \ + "$(repeat_char - $((widths_index + 1)))" \ + "$(repeat_char - $((widths_name + 2)))" \ + "$(repeat_char - $((widths_count + 2)))" \ + "$(repeat_char - $((widths_total + 1)))" + } + + # Prints a row of the table (index, workflow name, workflow count, global total) + print_table_row() { + local index=$1 + local name=$2 + local count=$3 + local total=$4 + printf "%*s | %-*s | %*s | %*s\n" \ + "$widths_index" "$index" \ + "$widths_name" "$name" \ + "$widths_count" "$count" \ + "$widths_total" "$total" + } + + # Alias for print_table_row() with empty index and total columns. + print_summary_row() { + local name=$1 + local count=$2 + print_table_row "" "$name" "$count" "" + } + + # ================================================================================================ + # Print progress table header + # ================================================================================================ + print_table_header "Workflow Cleanup Progress" + print_table_row "Index" "Workflow" "Workflow Count" "Global Total" + print_table_separator + + # ================================================================================================ + # Core workflow loop, iterate over workflows + # ================================================================================================ + + n_workflows=$(echo "$WORKFLOWS" | jq -r '. | length') + total=0 + summary=() + index=0 + + mapfile -t WF_ARRAY < <(echo "$WORKFLOWS" | jq -c '.[]') + for wf in "${WF_ARRAY[@]}"; do + index=$((index + 1)) + name=$(echo "$wf" | jq -r '.name') + count=0 + id=$(echo "$wf" | jq -r '.node_id') + + # Safety checks + if [ -z "$name" ]; then + echo "::error title=Workflow name empty::Resolved workflow name is empty at index $index" + exit 1 + fi + if [ -z "$id" ]; then + echo "::error title=Workflow ID missing::Workflow '$name' has no ID" + exit 1 + fi + + cursor="" + + # Paginate over workflow runs + while true; do + response=$(gh_workflow_run_page "$id" "$cursor") + + run_ids=$(echo "$response" | jq -r '.data.node.runs.nodes[].databaseId') + has_next=$(echo "$response" | jq -r '.data.node.runs.pageInfo.hasNextPage') + cursor=$(echo "$response" | jq -r '.data.node.runs.pageInfo.endCursor') + + [ -z "$run_ids" ] && break + + deleted=$(echo "$run_ids" | wc -l | tr -d ' ') + count=$((count + deleted)) + total=$((total + deleted)) + + # Print progress + print_table_row "[$index/$n_workflows]" "$name" "$count" "$total" + + if [ "$MODE" = "execute" ]; then + for run_id in $run_ids; do + gh run delete "$run_id" >/dev/null + done + fi + + [ "$has_next" != "true" ] && break + done + + summary+=("$name|$count") + done + + # ================================================================================================ + # Print a summary table + # ================================================================================================ + print_table_header "Workflow Cleanup Summary" + print_summary_row "Workflow" "Runs" + print_table_separator + for entry in "${summary[@]}"; do + wf="${entry%%|*}" + count="${entry##*|}" + print_summary_row "$wf" "$count" + done + + # ================================================================================================ + # Print totals as a footer + # ================================================================================================ + print_table_separator + print_summary_row "TOTAL" "$total" + + if [ "$MODE" != "execute" ]; then + echo "Dry run complete. No runs were deleted." + else + echo "Cleanup complete." + fi diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index a5d6e3cae1..52cf042340 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -20,11 +20,11 @@ jobs: steps: - uses: actions/checkout@v6 with: - ref: 'next' + ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Rustup run: rustup install beta && rustup default beta - uses: taiki-e/install-action@v2 @@ -42,11 +42,11 @@ jobs: steps: - uses: actions/checkout@v6 with: - ref: 'next' + ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Install rust run: rustup update --no-self-update - name: Install cargo-hack @@ -54,15 +54,65 @@ jobs: - name: Check all feature combinations run: make check-features - # Check that our MSRV complies with our specified rust version. + workspace-packages: + name: list packages + runs-on: ubuntu-latest + outputs: + packages: ${{ steps.package-matrix.outputs.packages }} + # Deliberately use stable rust instead of the toolchain.toml version. + # This prevents installing the toolchain version which isn't crucial for this operation. + env: + RUSTUP_TOOLCHAIN: stable + steps: + - uses: actions/checkout@v6 + with: + ref: "next" + - name: Extract workspace packages + id: package-matrix + run: | + PACKAGES=$(cargo metadata --format-version 1 --no-deps \ + | jq -c ' + .workspace_members as $members + | .packages + | map(select(.id as $id | $members | index($id))) + | map(.name) + ') + + echo "packages=$PACKAGES" >> "$GITHUB_OUTPUT" + msrv: - name: msrv check - runs-on: ubuntu-24.04 + needs: workspace-packages + runs-on: ubuntu-latest + strategy: + matrix: + package: ${{ fromJson(needs.workspace-packages.outputs.packages) }} + # Deliberately use stable rust instead of the toolchain.toml version. + # This is prevents issues where e.g. `cargo-msrv` requires a newer version of rust than the toolchain.toml version. + env: + RUSTUP_TOOLCHAIN: stable steps: - uses: actions/checkout@v6 with: - ref: 'next' - - name: check + ref: "next" + - name: Install binstall + uses: cargo-bins/cargo-binstall@main + - name: Install cargo-msrv + run: cargo binstall --no-confirm cargo-msrv + - name: Get manifest path for package + id: pkg + run: | + MANIFEST_PATH=$(cargo metadata --format-version 1 --no-deps \ + | jq -r ' + .packages[] + | select(.name == "${{ matrix.package }}") + | .manifest_path + ') + echo "manifest_path=$MANIFEST_PATH" >> "$GITHUB_OUTPUT" + - name: Show package info + run: | + echo "Package: ${{ matrix.package }}" + echo "Manifest path: ${{ steps.pkg.outputs.manifest_path }}" + cargo msrv show --manifest-path "${{ steps.pkg.outputs.manifest_path }}" + - name: Check MSRV run: | - export PATH="$HOME/.cargo/bin:$PATH" - ./scripts/check-msrv.sh + cargo msrv verify --manifest-path "${{ steps.pkg.outputs.manifest_path }}" diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-crates.yml similarity index 63% rename from .github/workflows/publish-dry-run.yml rename to .github/workflows/publish-crates.yml index c84a08d34e..7fd35b712d 100644 --- a/.github/workflows/publish-dry-run.yml +++ b/.github/workflows/publish-crates.yml @@ -1,19 +1,15 @@ -name: Publish (dry-run) +name: Publish crates permissions: contents: read on: - push: - branches: [main, next] - -concurrency: - group: "${{ github.workflow }} @ ${{ github.ref }}" - cancel-in-progress: true + release: + types: [published] jobs: - publish-dry-run: - name: Cargo publish dry-run + publish-release: + name: Cargo publish release runs-on: Linux-ARM64-Runner if: ${{ github.repository_owner == '0xMiden' }} steps: @@ -21,14 +17,20 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + ref: ${{ github.event.release.tag_name }} + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler + - name: Log release info + run: | + echo "Publishing release ${{ github.event.release.tag_name }}" + echo "Commit: $(git rev-parse HEAD)" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - name: Install dependencies run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 - uses: taiki-e/install-action@v2 with: tool: cargo-binstall@1.16.6 @@ -38,7 +40,7 @@ jobs: run: | export PATH="$HOME/.cargo/bin:$PATH" ./scripts/check-msrv.sh - - name: Run cargo publish dry-run - run: cargo publish --workspace --dry-run + - name: Run cargo publish + run: cargo publish --workspace env: CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index a6d63d5035..3aea36b5c9 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -31,16 +31,16 @@ jobs: uses: actions/checkout@main with: fetch-depth: 0 - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Build and Publish Node uses: ./.github/actions/debian with: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: node - service: miden-node - package: node + package: miden-node + packaging_dir: node crate: miden-node arch: ${{ matrix.arch }} @@ -62,8 +62,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover - package: prover + package: miden-prover + packaging_dir: prover crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -85,8 +85,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover-proxy - package: prover-proxy + package: miden-prover-proxy + packaging_dir: prover-proxy crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -108,7 +108,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: network-monitor - service: miden-network-monitor - package: network-monitor + package: miden-network-monitor + packaging_dir: network-monitor crate: miden-network-monitor arch: ${{ matrix.arch }} diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index 81e8d74475..be01b9d1e7 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -3,8 +3,8 @@ name: Publish Debian Package on: workflow_dispatch: inputs: - service: - description: "Name of service to publish" + package: + description: "Name of package to publish" required: true type: choice options: @@ -20,7 +20,7 @@ on: - network-monitor - node - remote-prover - package: + packaging_dir: required: true description: "Name of packaging directory" type: choice @@ -48,7 +48,7 @@ permissions: jobs: publish: - name: Publish ${{ inputs.service }} ${{ matrix.arch }} Debian + name: Publish ${{ inputs.package }} ${{ matrix.arch }} Debian strategy: matrix: arch: [amd64, arm64] @@ -60,8 +60,8 @@ jobs: with: fetch-depth: 0 - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-rocksdb + - uses: ./.github/actions/install-protobuf-compiler - name: Build and Publish Packages uses: ./.github/actions/debian @@ -69,7 +69,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ inputs.version }} crate_dir: ${{ inputs.crate_dir }} - service: ${{ inputs.service }} package: ${{ inputs.package }} + packaging_dir: ${{ inputs.packaging_dir }} crate: ${{ inputs.crate }} arch: ${{ matrix.arch }} diff --git a/.github/workflows/publish-main.yml b/.github/workflows/publish-main.yml deleted file mode 100644 index fcaab36a86..0000000000 --- a/.github/workflows/publish-main.yml +++ /dev/null @@ -1,58 +0,0 @@ -name: Publish (main) - -permissions: - contents: read - -on: - release: - types: [published] - -jobs: - publish-release: - name: Cargo publish release - runs-on: Linux-ARM64-Runner - if: ${{ github.repository_owner == '0xMiden' }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: main - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - # Ensure the release tag refers to the latest commit on main. - # Compare the commit SHA that triggered the workflow with the HEAD of the branch we just - # checked out (main). - - name: Verify release was triggered from main HEAD - run: | - tag_sha="${{ github.sha }}" - main_sha="$(git rev-parse HEAD)" - - echo "Tag points to: $tag_sha" - echo "Current main HEAD is: $main_sha" - - if [ "$tag_sha" != "$main_sha" ]; then - echo "::error::The release tag was not created from the latest commit on main. Aborting." - exit 1 - fi - echo "Release tag matches main HEAD — continuing." - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install dependencies - run: sudo apt-get update && sudo apt-get install -y jq - - name: Update Rust toolchain - run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 - - uses: taiki-e/install-action@v2 - with: - tool: cargo-binstall@1.16.6 - - name: Install cargo-msrv - run: cargo binstall --no-confirm --force cargo-msrv - - name: Check MSRV for each workspace member - run: | - export PATH="$HOME/.cargo/bin:$PATH" - ./scripts/check-msrv.sh - - name: Run cargo publish - run: cargo publish --workspace - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/.gitignore b/.gitignore index 0a086d3d0b..a4d92ce8ed 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ # will have compiled files and executables debug/ target/ +miden-node-stress-test-* # Generated by protox `file_descriptor_set.bin` *.bin diff --git a/CHANGELOG.md b/CHANGELOG.md index 2dc3173446..7f4644a3cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,39 @@ # Changelog +## v0.14.0 (TBD) + +### Enhancements + +- [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/node/pull/1579)). +- [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/node/pull/1595)). +- Validator now persists validated transactions ([#1614](https://github.com/0xMiden/node/pull/1614)). +- [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/node/issues/1591)). +- Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/node/issues/1591)). +- Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/node/issues/1701)). +- Added KMS signing support in validator ([#1677](https://github.com/0xMiden/node/pull/1677)). + +### Changes + +- [BREAKING] Removed obsolete `SyncState` RPC endpoint; clients should use `SyncNotes`, `SyncNullifiers`, `SyncAccountVault`, `SyncAccountStorageMaps`, `SyncTransactions`, or `SyncChainMmr` instead ([#1636](https://github.com/0xMiden/node/pull/1636)). +- Added account ID limits for `SyncTransactions`, `SyncAccountVault`, and `SyncAccountStorageMaps` to `GetLimits` responses ([#1636](https://github.com/0xMiden/node/pull/1636)). +- [BREAKING] Added typed `GetAccountError` for `GetAccount` endpoint, splitting `BlockNotAvailable` into `UnknownBlock` and `BlockPruned`. `AccountNotFound` and `AccountNotPublic` now return `InvalidArgument` gRPC status instead of `NotFound`; clients should parse the error details discriminant rather than branching on status codes ([#1646](https://github.com/0xMiden/node/pull/1646)). +- Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/node/pull/1594)). +- Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/node/pull/1610)). +- Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/node/pull/1611)). +- Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/node/pull/1651)). +- Added support for generic account loading at genesis ([#1624](https://github.com/0xMiden/node/pull/1624)). +- Improved tracing span fields ([#1650](https://github.com/0xMiden/node/pull/1650)) + - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/node/pull/1662)). +- [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/node/pull/1688)). +- [BREAKING] Renamed `NoteRoot` protobuf message used in `GetNoteScriptByRoot` gRPC endpoints into `NoteScriptRoot` ([#1722](https://github.com/0xMiden/node/pull/1722)). + +### Fixes + +- Fixed network monitor looping on stale wallet nonce after node restarts by re-syncing wallet state from RPC after repeated failures ([#1748](https://github.com/0xMiden/node/pull/1748)). +- Fixed `bundled start` panicking due to duplicate `data_directory` clap argument name between `BundledCommand::Start` and `NtxBuilderConfig` ([#1732](https://github.com/0xMiden/node/pull/1732)). +- Fixed `bundled bootstrap` requiring `--validator.key.hex` or `--validator.key.kms-id` despite a default key being configured ([#1732](https://github.com/0xMiden/node/pull/1732)). +- Fixed incorrectly classifying private notes with the network attachment as network notes ([#1378](https://github.com/0xMiden/node/pull/1738)). + ## v0.13.7 (2026-02-25) - Updated `SyncAccountStorageMaps` and `SyncAccountVault` to allow all accounts with public state, including network accounts ([#1711](https://github.com/0xMiden/node/pull/1711)). @@ -10,43 +44,44 @@ ## v0.13.5 (2026-02-19) -- OpenTelemetry traces are now flushed before program termination on panic ([#1643](https://github.com/0xMiden/miden-node/pull/1643)). -- Added support for the note transport layer in the network monitor ([#1660](https://github.com/0xMiden/miden-node/pull/1660)). -- Debian packages now include debug symbols ([#1666](https://github.com/0xMiden/miden-node/pull/1666)). -- Debian packages now have coredumps enabled ([#1666](https://github.com/0xMiden/miden-node/pull/1666)). -- Fixed storage map keys not being hashed before insertion into the store's SMT forest ([#1681](https://github.com/0xMiden/miden-node/pull/1681)). +- OpenTelemetry traces are now flushed before program termination on panic ([#1643](https://github.com/0xMiden/node/pull/1643)). +- Added support for the note transport layer in the network monitor ([#1660](https://github.com/0xMiden/node/pull/1660)). +- Debian packages now include debug symbols ([#1666](https://github.com/0xMiden/node/pull/1666)). +- Debian packages now have coredumps enabled ([#1666](https://github.com/0xMiden/node/pull/1666)). +- Fixed storage map keys not being hashed before insertion into the store's SMT forest ([#1681](https://github.com/0xMiden/node/pull/1681)). ## v0.13.4 (2026-02-04) -- Fixed network monitor displaying explorer URL as a "null" hyperlink when unset ([#1617](https://github.com/0xMiden/miden-node/pull/1617)). -- Fixed empty storage maps not being inserted into `storage_entries` table when inserting storage delta ([#1642](https://github.com/0xMiden/miden-node/pull/1642)). +- Fixed network monitor displaying explorer URL as a "null" hyperlink when unset ([#1617](https://github.com/0xMiden/node/pull/1617)). +- Fixed empty storage maps not being inserted into `storage_entries` table when inserting storage delta ([#1642](https://github.com/0xMiden/node/pull/1642)). ## v0.13.3 (2026-01-29) -- Fixed network monitor faucet test failing to parse `/get_metadata` response due to field type mismatches ([#1612](https://github.com/0xMiden/miden-node/pull/1612)). +- Fixed network monitor faucet test failing to parse `/get_metadata` response due to field type mismatches ([#1612](https://github.com/0xMiden/node/pull/1612)). ## v0.13.2 (2026-01-27) -- Network transaction builder no longer creates conflicting transactions by consuming the same notes twice ([#1597](https://github.com/0xMiden/miden-node/issues/1597)). +- Network transaction builder no longer creates conflicting transactions by consuming the same notes twice ([#1597](https://github.com/0xMiden/node/issues/1597)). ## v0.13.1 (2026-01-27) ### Enhancements -- Bootstrap's genesis configuration file now allows eliding `wallet` and `fungible_faucet` fields ([#1590](https://github.com/0xMiden/miden-node/pull/1590)). -- Updated miden-base dependencies to version 0.13.3 ([#1601](https://github.com/0xMiden/miden-node/pull/1601)). +- Bootstrap's genesis configuration file now allows eliding `wallet` and `fungible_faucet` fields ([#1590](https://github.com/0xMiden/node/pull/1590)). +- Updated miden-base dependencies to version 0.13.3 ([#1601](https://github.com/0xMiden/node/pull/1601)). ### Fixes -- Bootstrap's genesis configuration file is now optional again ([#1590](https://github.com/0xMiden/miden-node/pull/1590)). -- Network transaction builder fails if output notes are created ([#1599](https://github.com/0xMiden/miden-node/pull/1599)). -- Fixed the copy button in the network monitor ([#1600](https://github.com/0xMiden/miden-node/pull/1600)). -- Network transaction builder now loads foreign account code into the MAST store when consuming network notes ([#1598](https://github.com/0xMiden/miden-node/pull/1598)). +- Bootstrap's genesis configuration file is now optional again ([#1590](https://github.com/0xMiden/node/pull/1590)). +- Network transaction builder fails if output notes are created ([#1599](https://github.com/0xMiden/node/pull/1599)). +- Fixed the copy button in the network monitor ([#1600](https://github.com/0xMiden/node/pull/1600)). +- Network transaction builder now loads foreign account code into the MAST store when consuming network notes ([#1598](https://github.com/0xMiden/node/pull/1598)). ## v0.13.0 (2026-01-23) ### Enhancements +- Cleanup old account data from the database on apply block ([#1304](https://github.com/0xMiden/miden-node/issues/1304)). - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). @@ -68,206 +103,209 @@ - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Improve speed of account updates ([#1567](https://github.com/0xMiden/miden-node/pull/1567)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). - Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). -- Pined tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). -- Added `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). -- Added check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/miden-node/pull/1536)). ### Changes -- Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). -- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). -- Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). -- [BREAKING] Removed `GetAccountDetails` RPC endpoint. Use `GetAccount` instead ([#1185](https://github.com/0xMiden/miden-node/issues/1185)). -- [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). -- Normalized response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). -- [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). -- Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). -- Removed `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). -- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298), [#1436](https://github.com/0xMiden/miden-node/pull/1436)). -- Added `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). -- Refactored account table and introduce tracking forest ([#1394](https://github.com/0xMiden/miden-node/pull/1394)). -- [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). -- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). -- [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). -- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). -- Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). -- Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). -- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). -- [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). -- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). -- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). -- Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). -- [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/miden-node/pull/1526)). -- [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/miden-node/pull/1572)). -- [BREAKING] Renamed `SyncStorageMaps` RPC endpoint to `SyncAccountStorageMaps` for consistency ([#1581](https://github.com/0xMiden/miden-node/pull/1581)). -- Removed git information from node's `--version` CLI as it was often incorrect ([#1576](https://github.com/0xMiden/miden-node/pull/1576)). -- [BREAKING] Renamed `GetNetworkAccountDetailsByPrefix` endpoint to `GetNetworkAccountDetailsById` which now accepts full account ID instead of 30-bit prefix ([#1580](https://github.com/0xMiden/miden-node/pull/1580)). -- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). +- Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/node/pull/1366)). +- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/node/pull/1388)). +- Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/node/pull/1441)). +- [BREAKING] Removed `GetAccountDetails` RPC endpoint. Use `GetAccount` instead ([#1185](https://github.com/0xMiden/node/issues/1185)). +- [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/node/pull/1357)). +- Normalized response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/node/pull/1357)). +- [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/node/pull/1348)). +- Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/node/pull/1294)). +- Removed `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/node/issues/1352)). +- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/node/pull/1298), [#1436](https://github.com/0xMiden/node/pull/1436)). +- Added `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/node/issues/1353)). +- Refactored account table and introduce tracking forest ([#1394](https://github.com/0xMiden/node/pull/1394)). +- [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/node/pull/1401)). +- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/node/pull/1424)). +- [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/node/pull/1426)). +- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/node/pull/1430)). +- Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/node/pull/1438)). +- Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/node/pull/1438)). +- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/node/pull/1443)). +- [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/node/pull/1476)). +- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/node/pull/1481)). +- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/node/pull/1497)). +- Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/node/pull/1503)). +- [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/node/pull/1526)). +- [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/node/pull/1572)). +- [BREAKING] Renamed `SyncStorageMaps` RPC endpoint to `SyncAccountStorageMaps` for consistency ([#1581](https://github.com/0xMiden/node/pull/1581)). +- Removed git information from node's `--version` CLI as it was often incorrect ([#1576](https://github.com/0xMiden/node/pull/1576)). +- [BREAKING] Renamed `GetNetworkAccountDetailsByPrefix` endpoint to `GetNetworkAccountDetailsById` which now accepts full account ID instead of 30-bit prefix ([#1580](https://github.com/0xMiden/node/pull/1580)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/node/pull/1569)). ### Fixes -- RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). -- Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/miden-node/pull/1403)). -- Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/miden-node/pull/1407)). -- Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/miden-node/issues/1422)). -- Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/miden-node/pull/1461)). -- Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). -- Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/miden-node/pull/1501)). -- gRPC traces now correctly connect to the method implementation ([1553](https://github.com/0xMiden/miden-node/pull/1553)). -- Fixed ntx-builder crash on node restart after network transaction by adding missing `is_latest` filter to network account query ([#1578](https://github.com/0xMiden/miden-node/pull/1578)). +- RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/node/pull/1370)). +- Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/node/pull/1403)). +- Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/node/pull/1407)). +- Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/node/issues/1422)). +- Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/node/pull/1461)). +- Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/node/pull/1489)). +- Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/node/pull/1501)). +- gRPC traces now correctly connect to the method implementation ([1553](https://github.com/0xMiden/node/pull/1553)). +- Fixed ntx-builder crash on node restart after network transaction by adding missing `is_latest` filter to network account query ([#1578](https://github.com/0xMiden/node/pull/1578)). ## v0.12.8 (2026-01-15) ### Enhancements -- Enable traces within database closures ([#1511](https://github.com/0xMiden/miden-node/pull/1511)). +- Enable traces within database closures ([#1511](https://github.com/0xMiden/node/pull/1511)). ## v0.12.7 (2026-01-15) ### Enhancements -- Emit database table size metrics ([#1511](https://github.com/0xMiden/miden-node/pull/1511)). -- Improved telemetry in the network transaction builder ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). -- Improved telemetry in the store's `apply_block` ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). +- Emit database table size metrics ([#1511](https://github.com/0xMiden/node/pull/1511)). +- Improved telemetry in the network transaction builder ([#1508](https://github.com/0xMiden/node/pull/1508)). +- Improved telemetry in the store's `apply_block` ([#1508](https://github.com/0xMiden/node/pull/1508)). ### Fixes -- Network transaction builder now marks notes from any error as failed ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). -- Network transaction builder now adheres to note limit set by protocol ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). -- Race condition resolved in the store's `apply_block` ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - - This presented as a database locked error and in rare cases a desync between the mempool and store. +- Network transaction builder now marks notes from any error as failed ([#1508](https://github.com/0xMiden/node/pull/1508)). +- Network transaction builder now adheres to note limit set by protocol ([#1508](https://github.com/0xMiden/node/pull/1508)). +- Race condition resolved in the store's `apply_block` ([#1508](https://github.com/0xMiden/node/pull/1508)). + - This presented as a database locked error and in rare cases a desync between the mempool and store. ## v0.12.6 (2026-01-12) ### Enhancements -- Added Faucet metadata to the `miden-network-monitor` binary ([#1373](https://github.com/0xMiden/miden-node/pull/1373)). -- Improve telemetry in the store ([#1504](https://github.com/0xMiden/miden-node/pull/1504)). +- Added Faucet metadata to the `miden-network-monitor` binary ([#1373](https://github.com/0xMiden/node/pull/1373)). +- Improve telemetry in the store ([#1504](https://github.com/0xMiden/node/pull/1504)). ### Fixes -- Block producer crash caused by pass through transactions ([#1396](https://github.com/0xMiden/miden-node/pull/1396)). +- Block producer crash caused by pass through transactions ([#1396](https://github.com/0xMiden/node/pull/1396)). ## v0.12.5 (2025-11-27) -- Actually update `miden-base` dependencies ([#1384](https://github.com/0xMiden/miden-node/pull/1384)). +- Actually update `miden-base` dependencies ([#1384](https://github.com/0xMiden/node/pull/1384)). ## v0.12.4 (2025-11-27) -- Split counter increment and tracking services in `miden-network-monitor` binary ([#1362](https://github.com/0xMiden/miden-node/pull/1362)). -- Updated the counter account from the `miden-network-monitor` to start at 0 ([#1367](https://github.com/0xMiden/miden-node/pull/1367)). -- Updated `miden-base` dependencies to fix ECDSA issues ([#1382](https://github.com/0xMiden/miden-node/pull/1382)). +- Split counter increment and tracking services in `miden-network-monitor` binary ([#1362](https://github.com/0xMiden/node/pull/1362)). +- Updated the counter account from the `miden-network-monitor` to start at 0 ([#1367](https://github.com/0xMiden/node/pull/1367)). +- Updated `miden-base` dependencies to fix ECDSA issues ([#1382](https://github.com/0xMiden/node/pull/1382)). ## v0.12.3 (2025-11-15) -- Added configurable timeout support to `RemoteBatchProver`, `RemoteBlockProver`, and `RemoteTransactionProver` clients ([#1365](https://github.com/0xMiden/miden-node/pull/1365)). -- Added configurable timeout support to `miden-network-monitor` binary ([#1365](https://github.com/0xMiden/miden-node/pull/1365)). +- Added configurable timeout support to `RemoteBatchProver`, `RemoteBlockProver`, and `RemoteTransactionProver` clients ([#1365](https://github.com/0xMiden/node/pull/1365)). +- Added configurable timeout support to `miden-network-monitor` binary ([#1365](https://github.com/0xMiden/node/pull/1365)). ## v0.12.2 (2025-11-12) -- Fixed `PoW` challenge solving in `miden-network-monitor` binary ([#1363](https://github.com/0xMiden/miden-node/pull/1363)). +- Fixed `PoW` challenge solving in `miden-network-monitor` binary ([#1363](https://github.com/0xMiden/node/pull/1363)). ## v0.12.1 (2025-11-08) -- Added support for network transaction service in `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/miden-node/pull/1295)). -- Improves `.env` file example in for the `miden-network-monitor` binary ([#1345](https://github.com/0xMiden/miden-node/pull/1345)). +- Added support for network transaction service in `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/node/pull/1295)). +- Improves `.env` file example in for the `miden-network-monitor` binary ([#1345](https://github.com/0xMiden/node/pull/1345)). ## v0.12.0 (2025-11-06) ### Changes - [BREAKING] Updated MSRV to 1.90. -- [BREAKING] Refactored `CheckNullifiersByPrefix` endpoint adding pagination ([#1191](https://github.com/0xMiden/miden-node/pull/1191)). -- [BREAKING] Renamed `CheckNullifiersByPrefix` endpoint to `SyncNullifiers` ([#1191](https://github.com/0xMiden/miden-node/pull/1191)). -- Added `GetNoteScriptByRoot` gRPC endpoint for retrieving a note script by its root ([#1196](https://github.com/0xMiden/miden-node/pull/1196)). -- [BREAKING] Added `block_range` and `pagination_info` fields to paginated gRPC endpoints ([#1205](https://github.com/0xMiden/miden-node/pull/1205)). -- Implemented usage of `tonic` error codes for gRPC errors ([#1208](https://github.com/0xMiden/miden-node/pull/1208)). -- [BREAKING] Replaced `GetAccountProofs` with `GetAccountProof` in the public store API (#[1211](https://github.com/0xMiden/miden-node/pull/1211)). -- Implemented storage map `DataStore` function ([#1226](https://github.com/0xMiden/miden-node/pull/1226)). -- [BREAKING] Refactored the mempool to use a single DAG across transactions and batches ([#1234](https://github.com/0xMiden/miden-node/pull/1234)). -- [BREAKING] Renamed `RemoteProverProxy` to `RemoteProverClient` ([#1236](https://github.com/0xMiden/miden-node/pull/1236)). -- Added pagination to `SyncNotes` endpoint ([#1257](https://github.com/0xMiden/miden-node/pull/1257)). -- Added application level error in gRPC endpoints ([#1266](https://github.com/0xMiden/miden-node/pull/1266)). -- Added `deploy-account` command to `miden-network-monitor` binary ([#1276](https://github.com/0xMiden/miden-node/pull/1276)). -- [BREAKING] Response type nuances of `GetAccountProof` in the public store API (#[1277](https://github.com/0xMiden/miden-node/pull/1277)). -- Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/miden-node/pull/1278)). -- Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/miden-node/pull/1293)). -- [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/miden-node/pull/1292)). -- [BREAKING] Added `rocksdb` feature to enable rocksdb backends of `LargeSmt` ([#1326](https://github.com/0xMiden/miden-node/pull/1326)). -- [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/miden-node/pull/1333)). -- Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/miden-node/pull/1332)). -- Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/miden-node/pull/1338)). +- [BREAKING] Refactored `CheckNullifiersByPrefix` endpoint adding pagination ([#1191](https://github.com/0xMiden/node/pull/1191)). +- [BREAKING] Renamed `CheckNullifiersByPrefix` endpoint to `SyncNullifiers` ([#1191](https://github.com/0xMiden/node/pull/1191)). +- Added `GetNoteScriptByRoot` gRPC endpoint for retrieving a note script by its root ([#1196](https://github.com/0xMiden/node/pull/1196)). +- [BREAKING] Added `block_range` and `pagination_info` fields to paginated gRPC endpoints ([#1205](https://github.com/0xMiden/node/pull/1205)). +- Implemented usage of `tonic` error codes for gRPC errors ([#1208](https://github.com/0xMiden/node/pull/1208)). +- [BREAKING] Replaced `GetAccountProofs` with `GetAccountProof` in the public store API (#[1211](https://github.com/0xMiden/node/pull/1211)). +- Implemented storage map `DataStore` function ([#1226](https://github.com/0xMiden/node/pull/1226)). +- [BREAKING] Refactored the mempool to use a single DAG across transactions and batches ([#1234](https://github.com/0xMiden/node/pull/1234)). +- [BREAKING] Renamed `RemoteProverProxy` to `RemoteProverClient` ([#1236](https://github.com/0xMiden/node/pull/1236)). +- Added pagination to `SyncNotes` endpoint ([#1257](https://github.com/0xMiden/node/pull/1257)). +- Added application level error in gRPC endpoints ([#1266](https://github.com/0xMiden/node/pull/1266)). +- Added `deploy-account` command to `miden-network-monitor` binary ([#1276](https://github.com/0xMiden/node/pull/1276)). +- [BREAKING] Response type nuances of `GetAccountProof` in the public store API (#[1277](https://github.com/0xMiden/node/pull/1277)). +- Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/node/pull/1278)). +- Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/node/pull/1293)). +- [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/node/pull/1292)). +- [BREAKING] Added `rocksdb` feature to enable rocksdb backends of `LargeSmt` ([#1326](https://github.com/0xMiden/node/pull/1326)). +- [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/node/pull/1333)). +- Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/node/pull/1332)). +- Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/node/pull/1338)). ## v0.11.3 (2025-11-04) -- Reduced note retries to 1 ([#1308](https://github.com/0xMiden/miden-node/pull/1308)). -- Address network transaction builder (NTX) invariant breaking for unavailable accounts ([#1312](https://github.com/0xMiden/miden-node/pull/1312)). -- Tweaked HTTP configurations on the pingora proxy server ([#1281](https://github.com/0xMiden/miden-node/pull/1281)). -- Added the counter increment task to `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/miden-node/pull/1295)). +- Reduced note retries to 1 ([#1308](https://github.com/0xMiden/node/pull/1308)). +- Address network transaction builder (NTX) invariant breaking for unavailable accounts ([#1312](https://github.com/0xMiden/node/pull/1312)). +- Tweaked HTTP configurations on the pingora proxy server ([#1281](https://github.com/0xMiden/node/pull/1281)). +- Added the counter increment task to `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/node/pull/1295)). ## v0.11.2 (2025-09-10) -- Added support for keepalive requests against base path `/` of RPC server ([#1212](https://github.com/0xMiden/miden-node/pull/1212)). -- [BREAKING] Replace `GetAccountProofs` with `GetAccountProof` in the public store API ([#1211](https://github.com/0xMiden/miden-node/pull/1211)). -- [BREAKING] Optimize `GetAccountProof` for small accounts ([#1185](https://github.com/0xMiden/miden-node/pull/1185)). +- Added support for keepalive requests against base path `/` of RPC server ([#1212](https://github.com/0xMiden/node/pull/1212)). +- [BREAKING] Replace `GetAccountProofs` with `GetAccountProof` in the public store API ([#1211](https://github.com/0xMiden/node/pull/1211)). +- [BREAKING] Optimize `GetAccountProof` for small accounts ([#1185](https://github.com/0xMiden/node/pull/1185)). ## v0.11.1 (2025-09-08) - Removed decorators from scripts when submitting transactions and batches, and inserting notes into the DB ([#1194](https://github.com/ -0xMiden/miden-node/pull/1194)). +0xMiden/node/pull/1194)). - Refresh `miden-base` dependencies. -- Added `SyncTransactions` gRPC endpoint for retrieving transactions for specific accounts within a block range ([#1224](https://github.com/0xMiden/miden-node/pull/1224)). -- Added `miden-network-monitor` binary for monitoring the Miden network ([#1217](https://github.com/0xMiden/miden-node/pull/1217)). +- Added `SyncTransactions` gRPC endpoint for retrieving transactions for specific accounts within a block range ([#1224](https://github.com/0xMiden/node/pull/1224)). +- Added `miden-network-monitor` binary for monitoring the Miden network ([#1217](https://github.com/0xMiden/node/pull/1217)). ## v0.11.0 (2025-08-28) ### Enhancements -- Added environment variable support for batch and block size CLI arguments ([#1081](https://github.com/0xMiden/miden-node/pull/1081)). -- RPC accept header now supports specifying the genesis commitment in addition to the RPC version. This lets clients ensure they are on the right network ([#1084](https://github.com/0xMiden/miden-node/pull/1084)). -- A transaction's account delta is now checked against its commitments in `SubmitProvenTransaction` endpoint ([#1093](https://github.com/0xMiden/miden-node/pull/1093)). -- Added check for Account Id prefix uniqueness when transactions to create accounts are submitted to the mempool ([#1094](https://github.com/0xMiden/miden-node/pull/1094)). -- Added benchmark CLI sub-command for the `miden-store` component to measure the state load time ([#1154](https://github.com/0xMiden/miden-node/pull/1154)). -- Retry failed network notes with exponential backoff instead of immediately ([#1116](https://github.com/0xMiden/miden-node/pull/1116)) -- Network notes are now dropped after failing 30 times ([#1116](https://github.com/0xMiden/miden-node/pull/1116)) -- gRPC server timeout is now configurable (defaults to `10s`) ([#1133](https://github.com/0xMiden/miden-node/pull/1133)) -- [BREAKING] Refactored protobuf messages ([#1045](https://github.com/0xMiden/miden-node/pull/#1045)). -- Added `SyncStorageMaps` gRPC endpoint for retrieving account storage maps ([#1140](https://github.com/0xMiden/miden-node/pull/1140), [#1132](https://github.com/0xMiden/miden-node/pull/1132)). -- Added `SyncAccountVault` gRPC endpoints for retrieving account assets ([#1176](https://github.com/0xMiden/miden-node/pull/1176)). -- Refactored Network Transaction Builder to manage dedicated tasks for every network account in the chain ([#1219](https://github.com/0xMiden/miden-node/pull/1219)). +- Added environment variable support for batch and block size CLI arguments ([#1081](https://github.com/0xMiden/node/pull/1081)). +- RPC accept header now supports specifying the genesis commitment in addition to the RPC version. This lets clients ensure they are on the right network ([#1084](https://github.com/0xMiden/node/pull/1084)). +- A transaction's account delta is now checked against its commitments in `SubmitProvenTransaction` endpoint ([#1093](https://github.com/0xMiden/node/pull/1093)). +- Added check for Account Id prefix uniqueness when transactions to create accounts are submitted to the mempool ([#1094](https://github.com/0xMiden/node/pull/1094)). +- Added benchmark CLI sub-command for the `miden-store` component to measure the state load time ([#1154](https://github.com/0xMiden/node/pull/1154)). +- Retry failed network notes with exponential backoff instead of immediately ([#1116](https://github.com/0xMiden/node/pull/1116)) +- Network notes are now dropped after failing 30 times ([#1116](https://github.com/0xMiden/node/pull/1116)) +- gRPC server timeout is now configurable (defaults to `10s`) ([#1133](https://github.com/0xMiden/node/pull/1133)) +- [BREAKING] Refactored protobuf messages ([#1045](https://github.com/0xMiden/node/pull/#1045)). +- Added `SyncStorageMaps` gRPC endpoint for retrieving account storage maps ([#1140](https://github.com/0xMiden/node/pull/1140), [#1132](https://github.com/0xMiden/node/pull/1132)). +- Added `SyncAccountVault` gRPC endpoints for retrieving account assets ([#1176](https://github.com/0xMiden/node/pull/1176)). +- Refactored Network Transaction Builder to manage dedicated tasks for every network account in the chain ([#1219](https://github.com/0xMiden/node/pull/1219)). ### Changes - [BREAKING] Updated MSRV to 1.88. -- [BREAKING] De-duplicate storage of code in DB (no-migration) ([#1083](https://github.com/0xMiden/miden-node/issue/#1083)). -- [BREAKING] RPC accept header format changed from `application/miden.vnd+grpc.` to `application/vnd.miden; version=` ([#1084](https://github.com/0xMiden/miden-node/pull/1084)). -- [BREAKING] Integrated `FeeParameters` into block headers. ([#1122](https://github.com/0xMiden/miden-node/pull/1122)). -- [BREAKING] Genesis configuration now supports fees ([#1157](https://github.com/0xMiden/miden-node/pull/1157)). +- [BREAKING] De-duplicate storage of code in DB (no-migration) ([#1083](https://github.com/0xMiden/node/issue/#1083)). +- [BREAKING] RPC accept header format changed from `application/miden.vnd+grpc.` to `application/vnd.miden; version=` ([#1084](https://github.com/0xMiden/node/pull/1084)). +- [BREAKING] Integrated `FeeParameters` into block headers. ([#1122](https://github.com/0xMiden/node/pull/1122)). +- [BREAKING] Genesis configuration now supports fees ([#1157](https://github.com/0xMiden/node/pull/1157)). - Configure `NativeFaucet`, which determines the native asset used to pay fees - Configure the base verification fee - Note: fees are not yet activated, and this has no impact beyond setting these values in the block headers -- [BREAKING] Remove public store API `GetAccountStateDelta` ([#1162](https://github.com/0xMiden/miden-node/pull/1162)). -- Removed `faucet` binary ([#1172](https://github.com/0xMiden/miden-node/pull/1172)). -- Add `genesis_commitment` in `Status` response ([#1181](https://github.com/0xMiden/miden-node/pull/1181)). +- [BREAKING] Remove public store API `GetAccountStateDelta` ([#1162](https://github.com/0xMiden/node/pull/1162)). +- Removed `faucet` binary ([#1172](https://github.com/0xMiden/node/pull/1172)). +- Add `genesis_commitment` in `Status` response ([#1181](https://github.com/0xMiden/node/pull/1181)). ### Fixes - [BREAKING] Integrated proxy status endpoint into main proxy service, removing separate status port. -- RPC requests with wildcard (`*/*`) media-type are not longer rejected ([#1084](https://github.com/0xMiden/miden-node/pull/1084)). -- Stress-test CLI account now properly sets the storage mode and increment nonce in transactions ([#1113](https://github.com/0xMiden/miden-node/pull/1113)). -- [BREAKING] Update `notes` table schema to have a nullable `consumed_block_num` ([#1100](https://github.com/0xMiden/miden-node/pull/1100)). -- Network Transaction Builder now correctly discards non-single-target network notes instead of panicking ([#1166](https://github.com/0xMiden/miden-node/pull/1166)). +- RPC requests with wildcard (`*/*`) media-type are not longer rejected ([#1084](https://github.com/0xMiden/node/pull/1084)). +- Stress-test CLI account now properly sets the storage mode and increment nonce in transactions ([#1113](https://github.com/0xMiden/node/pull/1113)). +- [BREAKING] Update `notes` table schema to have a nullable `consumed_block_num` ([#1100](https://github.com/0xMiden/node/pull/1100)). +- Network Transaction Builder now correctly discards non-single-target network notes instead of panicking ([#1166](https://github.com/0xMiden/node/pull/1166)). ### Removed -- Moved the `miden-faucet` binary to the [`miden-faucet` repository](https://github.com/0xmiden/miden-faucet) ([#1179](https://github.com/0xMiden/miden-node/pull/1179)). +- Moved the `miden-faucet` binary to the [`miden-faucet` repository](https://github.com/0xmiden/miden-faucet) ([#1179](https://github.com/0xMiden/node/pull/1179)). ## v0.10.1 (2025-07-14) ### Fixes -- Network accounts are no longer disabled after one transaction ([#1086](https://github.com/0xMiden/miden-node/pull/1086)). +- Network accounts are no longer disabled after one transaction ([#1086](https://github.com/0xMiden/node/pull/1086)). ## v0.10.0 (2025-07-10) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 94e6830753..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,126 +0,0 @@ -# Contributing to Miden Node - -#### First off, thanks for taking the time to contribute! - -We want to make contributing to this project as easy and transparent as possible, whether it's: - -- Reporting a [bug](https://github.com/0xMiden/miden-node/issues/new?assignees=&labels=bug&projects=&template=1-bugreport.yml) -- Taking part in [discussions](https://github.com/0xMiden/miden-node/discussions) -- Submitting a [fix](https://github.com/0xMiden/miden-node/pulls) -- Proposing new [features](https://github.com/0xMiden/miden-node/issues/new?assignees=&labels=enhancement&projects=&template=2-feature-request.yml) - -  - -## Flow - -We are using [Github Flow](https://docs.github.com/en/get-started/quickstart/github-flow), so all code changes happen through pull requests from a [forked repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). - -### Branching - -- The current active branch is `next`. Every branch with a fix/feature must be forked from `next`. - -- The branch name should contain a short issue/feature description separated with hyphens [(kebab-case)](https://en.wikipedia.org/wiki/Letter_case#Kebab_case). - - For example, if the issue title is `Fix functionality X in component Y` then the branch name will be something like: `fix-x-in-y`. - -- New branch should be rebased from `next` before submitting a PR in case there have been changes to avoid merge commits. i.e. this branches state: - - ``` - A---B---C fix-x-in-y - / - D---E---F---G next - | | - (F, G) changes happened after `fix-x-in-y` forked - ``` - - should become this after rebase: - - ``` - A'--B'--C' fix-x-in-y - / - D---E---F---G next - ``` - - More about rebase [here](https://git-scm.com/docs/git-rebase) and [here](https://www.atlassian.com/git/tutorials/rewriting-history/git-rebase#:~:text=What%20is%20git%20rebase%3F,of%20a%20feature%20branching%20workflow.) - -### Commit messages - -- Commit messages should be written in a short, descriptive manner and be prefixed with tags for the change type and scope (if possible) according to the [semantic commit](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) scheme. For example, a new change to the `miden-node-store` crate might have the following message: `feat(miden-node-store): fix block-headers database schema` - -- Also squash commits to logically separated, distinguishable stages to keep git log clean: - - ``` - 7hgf8978g9... Added A to X \ - \ (squash) - gh354354gh... oops, typo --- * ---------> 9fh1f51gh7... feat(X): add A && B - / - 85493g2458... Added B to X / - - - 789fdfffdf... Fixed D in Y \ - \ (squash) - 787g8fgf78... blah blah --- * ---------> 4070df6f00... fix(Y): fixed D && C - / - 9080gf6567... Fixed C in Y / - ``` - -### Code Style and Documentation - -- For documentation in the codebase, we follow the [rustdoc](https://doc.rust-lang.org/rust-by-example/meta/doc.html) convention with no more than 100 characters per line. -- For code sections, we use code separators like the following to a width of 100 characters:: - - ``` - // CODE SECTION HEADER - // ================================================================================ - ``` - -- [Rustfmt](https://github.com/rust-lang/rustfmt), [Clippy](https://github.com/rust-lang/rust-clippy) and [Rustdoc](https://doc.rust-lang.org/rustdoc/index.html) linting is included in CI pipeline. Anyways it's preferable to run linting locally before push. To simplify running these commands in a reproducible manner we use `make` commands, you can run: - - ``` - make lint - ``` - -You can find more information about the `make` commands in the [Makefile](Makefile) - -### Testing - -After writing code different types of tests (unit, integration, end-to-end) are required to make sure that the correct behavior has been achieved and that no bugs have been introduced. You can run tests using the following command: - -``` -make test -``` - -### Versioning - -We use [semver](https://semver.org/) naming convention. - -  - -## Pre-PR checklist - -To make sure all commits adhere to our programming standards please follow the checklist: - -1. Repo forked and branch created from `next` according to the naming convention. -2. Commit messages and code style follow conventions. -3. Tests added for new functionality. -4. Documentation/comments updated for all changes according to our documentation convention. -5. Spellchecking ([typos](https://github.com/crate-ci/typos/tree/master?tab=readme-ov-file#install)), Rustfmt, Clippy and Rustdoc linting passed (run with `make lint`). -6. New branch rebased from `next`. - -  - -## Write bug reports with detail, background, and sample code - -**Great Bug Reports** tend to have: - -- A quick summary and/or background -- Steps to reproduce -- What you expected would happen -- What actually happens -- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) - -  - -## Any contributions you make will be under the MIT Software License - -In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern. diff --git a/Cargo.lock b/Cargo.lock index e8e6c19983..6bc2f415f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,19 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" -dependencies = [ - "cfg-if", - "getrandom 0.3.4", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.4" @@ -50,18 +37,12 @@ dependencies = [ ] [[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" +name = "alloca" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" dependencies = [ - "alloc-no-stdlib", + "cc", ] [[package]] @@ -144,15 +125,6 @@ dependencies = [ "backtrace", ] -[[package]] -name = "arc-swap" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" -dependencies = [ - "rustversion", -] - [[package]] name = "arrayref" version = "0.3.9" @@ -192,36 +164,405 @@ dependencies = [ ] [[package]] -name = "atomic" -version = "0.6.1" +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-config" +version = "1.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" +checksum = "8a8fc176d53d6fe85017f230405e3255cedb4a02221cb55ed6d76dccbbb099b2" dependencies = [ - "bytemuck", + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "hex", + "http 1.4.0", + "ring", + "time", + "tokio", + "tracing", + "url", + "zeroize", ] [[package]] -name = "atomic-waker" -version = "1.1.2" +name = "aws-credential-types" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +checksum = "e26bbf46abc608f2dc61fd6cb3b7b0665497cc259a21520151ed98f8b37d2c79" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] [[package]] -name = "atty" -version = "0.2.14" +name = "aws-lc-rs" +version = "1.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", + "aws-lc-sys", + "zeroize", ] [[package]] -name = "autocfg" -version = "1.5.0" +name = "aws-lc-sys" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "aws-runtime" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0f92058d22a46adf53ec57a6a96f34447daf02bff52e8fb956c66bcd5c6ac12" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "bytes-utils", + "fastrand", + "http 1.4.0", + "http-body 1.0.1", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-kms" +version = "1.100.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "723700afe7459a33d1ac30852e9208b801946c032625cc8c808f57b9563bb5c7" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.94.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "699da1961a289b23842d88fe2984c6ff68735fdf9bdcbc69ceaeb2491c9bf434" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.96.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3e3a4cb3b124833eafea9afd1a6cc5f8ddf3efefffc6651ef76a03cbc6b4981" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.98.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89c4f19655ab0856375e169865c91264de965bd74c407c7f1e403184b1049409" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f6ae9b71597dc5fd115d52849d7a5556ad9265885ad3492ea8d73b93bbc46e" +dependencies = [ + "aws-credential-types", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "form_urlencoded", + "hex", + "hmac", + "http 0.2.12", + "http 1.4.0", + "percent-encoding", + "sha2", + "time", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cba48474f1d6807384d06fec085b909f5807e16653c5af5c45dfe89539f0b70" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-http" +version = "0.63.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af4a8a5fe3e4ac7ee871237c340bbce13e982d37543b65700f4419e039f5d78e" +dependencies = [ + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http-client" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0709f0083aa19b704132684bc26d3c868e06bd428ccc4373b0b55c3e8748a58b" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "h2 0.3.27", + "h2 0.4.13", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper 1.8.1", + "hyper-rustls 0.24.2", + "hyper-rustls 0.27.7", + "hyper-util", + "pin-project-lite", + "rustls 0.21.12", + "rustls 0.23.36", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.4", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.62.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b3a779093e18cad88bbae08dc4261e1d95018c4c5b9356a52bcae7c0b6e9bb" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-observability" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3f39d5bb871aaf461d59144557f16d5927a5248a983a40654d9cf3b9ba183b" +dependencies = [ + "aws-smithy-runtime-api", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f76a580e3d8f8961e5d48763214025a2af65c2fa4cd1fb7f270a0e107a71b0" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd3dfc18c1ce097cf81fced7192731e63809829c6cbf933c1ec47452d08e1aa" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-client", + "aws-smithy-observability", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "pin-utils", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c55e0837e9b8526f49e0b9bfa9ee18ddee70e853f5bc09c5d11ebceddcb0fec" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.4.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "576b0d6991c9c32bc14fc340582ef148311f924d41815f641a308b5d11e8e7cd" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b53543b4b86ed43f051644f704a98c7291b3618b67adf057ee77a366fa52fcaa" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c50f3cdf47caa8d01f2be4a6663ea02418e892f9bbfd82c7b9a3a37eaccdd3a" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version 0.4.1", + "tracing", +] [[package]] name = "axum" @@ -233,10 +574,10 @@ dependencies = [ "bytes", "form_urlencoded", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-util", "itoa", "matchit", @@ -264,8 +605,8 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -311,6 +652,16 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "base64ct" version = "1.8.3" @@ -348,10 +699,10 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.10.0", + "bitflags", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -375,27 +726,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest", -] - [[package]] name = "blake3" version = "1.8.3" @@ -420,24 +756,12 @@ dependencies = [ ] [[package]] -name = "brotli" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.5.1" +name = "build-rs" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" +checksum = "ffc87f52297187fb5d25bde3d368f0480f88ac1d8f3cf4c80ac5575435511114" dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", + "unicode-ident", ] [[package]] @@ -446,12 +770,6 @@ version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" -[[package]] -name = "bytemuck" -version = "1.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" - [[package]] name = "byteorder" version = "1.5.0" @@ -460,9 +778,19 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] [[package]] name = "bzip2-sys" @@ -492,6 +820,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -501,40 +835,18 @@ dependencies = [ "nom", ] -[[package]] -name = "cf-rustracing" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f85c3824e4191621dec0551e3cef3d511f329da9a8990bf3e450a85651d97e" -dependencies = [ - "backtrace", - "rand 0.8.5", - "tokio", - "trackable", -] - -[[package]] -name = "cf-rustracing-jaeger" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a5f80d44c257c3300a7f45ada676c211e64bbbac591bbec19344a8f61fbcab" -dependencies = [ - "cf-rustracing", - "hostname", - "local-ip-address", - "percent-encoding", - "rand 0.9.2", - "thrift_codec", - "tokio", - "trackable", -] - [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -623,77 +935,38 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "atty", - "bitflags 1.3.2", - "clap_derive 3.2.25", - "clap_lex 0.2.4", - "indexmap 1.9.3", - "once_cell", - "strsim 0.10.0", - "termcolor", - "textwrap", -] - -[[package]] -name = "clap" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +checksum = "3e34525d5bbbd55da2bb745d34b36121baac88d07619a9a09cfcf4a6c0832785" dependencies = [ "clap_builder", - "clap_derive 4.5.49", + "clap_derive", ] [[package]] name = "clap_builder" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +checksum = "59a20016a20a3da95bef50ec7238dbd09baeef4311dcdd38ec15aba69812fb61" dependencies = [ "anstream", "anstyle", - "clap_lex 0.7.7", - "strsim 0.11.1", -] - -[[package]] -name = "clap_derive" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" -dependencies = [ - "heck 0.4.1", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", + "clap_lex", + "strsim", ] [[package]] name = "clap_derive" -version = "4.5.49" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.114", ] -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "clap_lex" version = "0.7.7" @@ -715,6 +988,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -762,36 +1045,26 @@ dependencies = [ "libc", ] -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - [[package]] name = "criterion" -version = "0.5.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" dependencies = [ + "alloca", "anes", "cast", "ciborium", - "clap 4.5.54", + "clap", "criterion-plot", - "is-terminal", - "itertools 0.10.5", + "itertools 0.13.0", "num-traits", - "once_cell", "oorandom", + "page_size", "plotters", "rayon", "regex", "serde", - "serde_derive", "serde_json", "tinytemplate", "walkdir", @@ -799,12 +1072,12 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.5.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" dependencies = [ "cast", - "itertools 0.10.5", + "itertools 0.13.0", ] [[package]] @@ -826,15 +1099,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "crossbeam-queue" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -897,47 +1161,14 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "daemonize" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfdaacb3c887a54d41bdf48d3af8873b3f5566469f8ba21b92057509f116e" -dependencies = [ - "libc", -] - -[[package]] -name = "darling" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" -dependencies = [ - "darling_core 0.20.11", - "darling_macro 0.20.11", -] - [[package]] name = "darling" version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core 0.21.3", - "darling_macro 0.21.3", -] - -[[package]] -name = "darling_core" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.11.1", - "syn 2.0.114", + "darling_core", + "darling_macro", ] [[package]] @@ -950,18 +1181,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.11.1", - "syn 2.0.114", -] - -[[package]] -name = "darling_macro" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" -dependencies = [ - "darling_core 0.20.11", - "quote", + "strsim", "syn 2.0.114", ] @@ -971,7 +1191,7 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core 0.21.3", + "darling_core", "quote", "syn 2.0.114", ] @@ -1015,7 +1235,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524bc3df0d57e98ecd022e21ba31166c2625e7d3e5bcc4510efaeeab4abcab04" dependencies = [ "deadpool-runtime", - "tracing", ] [[package]] @@ -1037,48 +1256,6 @@ dependencies = [ "powerfmt", ] -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" -dependencies = [ - "darling 0.20.11", - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "derive_builder_macro" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" -dependencies = [ - "derive_builder_core", - "syn 2.0.114", -] - [[package]] name = "derive_more" version = "2.1.1" @@ -1197,14 +1374,20 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd122633e4bef06db27737f21d3738fb89c8f6d5360d6d9d7635dda142a7757e" dependencies = [ - "darling 0.21.3", + "darling", "either", - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.114", ] +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "ecdsa" version = "0.16.9" @@ -1348,22 +1531,6 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" -[[package]] -name = "figment" -version = "0.10.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" -dependencies = [ - "atomic", - "parking_lot", - "pear", - "serde", - "tempfile", - "toml 0.8.23", - "uncased", - "version_check", -] - [[package]] name = "find-msvc-tools" version = "0.1.8" @@ -1371,21 +1538,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] -name = "fixedbitset" -version = "0.5.7" +name = "fixed-hash" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "static_assertions", +] [[package]] -name = "flate2" -version = "1.1.8" +name = "fixedbitset" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" -dependencies = [ - "crc32fast", - "libz-ng-sys", - "miniz_oxide", -] +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flume" @@ -1417,21 +1582,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.2" @@ -1450,6 +1600,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" version = "0.3.31" @@ -1599,15 +1755,18 @@ dependencies = [ ] [[package]] -name = "getset" -version = "0.1.6" +name = "getrandom" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" dependencies = [ - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.114", + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasip3", + "wasm-bindgen", ] [[package]] @@ -1633,6 +1792,25 @@ dependencies = [ "subtle", ] +[[package]] +name = "h2" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "h2" version = "0.4.13" @@ -1644,8 +1822,8 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http", - "indexmap 2.13.0", + "http 1.4.0", + "indexmap", "slab", "tokio", "tokio-util", @@ -1658,16 +1836,10 @@ version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ - "cfg-if", - "crunchy", - "zerocopy", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + "cfg-if", + "crunchy", + "zerocopy", +] [[package]] name = "hashbrown" @@ -1675,8 +1847,6 @@ version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ - "allocator-api2", - "equivalent", "foldhash 0.1.5", ] @@ -1694,27 +1864,12 @@ dependencies = [ "serde_core", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.5.2" @@ -1746,14 +1901,14 @@ dependencies = [ ] [[package]] -name = "hostname" -version = "0.4.2" +name = "http" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "cfg-if", - "libc", - "windows-link", + "bytes", + "fnv", + "itoa", ] [[package]] @@ -1766,6 +1921,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.1" @@ -1773,7 +1939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http", + "http 1.4.0", ] [[package]] @@ -1784,8 +1950,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -1807,6 +1973,30 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.27", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.8.1" @@ -1817,9 +2007,9 @@ dependencies = [ "bytes", "futures-channel", "futures-core", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -1830,19 +2020,35 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http", - "hyper", + "http 1.4.0", + "hyper 1.8.1", "hyper-util", - "rustls", + "rustls 0.23.36", + "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower-service", ] @@ -1852,29 +2058,13 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper", + "hyper 1.8.1", "hyper-util", "pin-project-lite", "tokio", "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.19" @@ -1886,14 +2076,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "http", - "http-body", - "hyper", + "http 1.4.0", + "http-body 1.0.1", + "hyper 1.8.1", "ipnet", "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.5.10", "system-configuration", "tokio", "tower-service", @@ -2006,6 +2196,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -2039,16 +2235,6 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.13.0" @@ -2057,14 +2243,10 @@ checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", + "serde", + "serde_core", ] -[[package]] -name = "inlinable_string" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" - [[package]] name = "inout" version = "0.1.4" @@ -2090,17 +2272,6 @@ dependencies = [ "serde", ] -[[package]] -name = "is-terminal" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" -dependencies = [ - "hermit-abi 0.5.2", - "libc", - "windows-sys 0.61.2", -] - [[package]] name = "is_ci" version = "1.2.0" @@ -2115,9 +2286,9 @@ checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" -version = "0.10.5" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] @@ -2161,6 +2332,28 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.34" @@ -2197,9 +2390,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] @@ -2240,6 +2433,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" version = "0.2.180" @@ -2258,9 +2457,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "librocksdb-sys" @@ -2287,16 +2486,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-ng-sys" -version = "1.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf914b7dd154ca9193afec311d8e39345c1bd93b48b3faa77329f0db8f553c0" -dependencies = [ - "cmake", - "libc", -] - [[package]] name = "libz-sys" version = "1.1.23" @@ -2308,12 +2497,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -2332,17 +2515,6 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" -[[package]] -name = "local-ip-address" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" -dependencies = [ - "libc", - "neli", - "windows-sys 0.61.2", -] - [[package]] name = "lock_api" version = "0.4.14" @@ -2407,18 +2579,15 @@ dependencies = [ [[package]] name = "lru" -version = "0.14.0" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8cc7106155f10bdf99a6f379688f543ad6596a415375b36a59a054ceda1198" -dependencies = [ - "hashbrown 0.15.5", -] +checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" [[package]] -name = "lru" -version = "0.16.3" +name = "lru-slab" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "lz4-sys" @@ -2457,37 +2626,31 @@ version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - [[package]] name = "miden-agglayer" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a867217bab689c0539f6b4797cb452f0932de6904479a38f1322e045b9383b" +checksum = "e492a6044cf8875a64d7eec130d260f2eda1c783795261f00d5d52837ed027bd" dependencies = [ "fs-err", "miden-assembly", "miden-core", "miden-core-lib", + "miden-crypto", "miden-protocol", "miden-standards", "miden-utils-sync", + "primitive-types", "regex", + "thiserror 2.0.18", "walkdir", ] [[package]] name = "miden-air" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" +checksum = "5cca9632323bd4e32ae5b21b101ed417a646f5d72196b1bf3f1ca889a148322a" dependencies = [ "miden-core", "miden-utils-indexing", @@ -2498,9 +2661,9 @@ dependencies = [ [[package]] name = "miden-assembly" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c6a18e29c03141cf9044604390a00691c7342924ec865b4acfdd560ff41ede" +checksum = "2395b2917aea613a285d3425d1ca07e6c45442e2b34febdea2081db555df62fc" dependencies = [ "env_logger", "log", @@ -2513,9 +2676,9 @@ dependencies = [ [[package]] name = "miden-assembly-syntax" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7458ff670f5a514bf972aa84d6e1851a4c4e9afa351f53b71bdc2218b99254b6" +checksum = "1f9bed037d137f209b9e7b28811ec78c0536b3f9259d6f4ceb5823c87513b346" dependencies = [ "aho-corasick", "env_logger", @@ -2537,9 +2700,9 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e92a0ddae8d0983e37bc636edba741947b1e3dc63baed2ad85921342080154a" +checksum = "b9564dfb23c529aad68369845b6897a6f62bacdeab7c00db432a5f16670764d4" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2547,9 +2710,9 @@ dependencies = [ [[package]] name = "miden-core" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a5c9c8c3d42ae8381ed49e47ff9ad2d2e345c4726761be36b7d4000ebb40ae" +checksum = "8714aa5f86c59e647b7417126b32adc4ef618f835964464f5425549df76b6d03" dependencies = [ "derive_more", "itertools 0.14.0", @@ -2569,9 +2732,9 @@ dependencies = [ [[package]] name = "miden-core-lib" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6556494ea5576803730fa15015bee6bd9d1a117450f22e7df0883421e7423674" +checksum = "1bb16a4d39202c59a7964d3585cd5af21a46a759ff6452cb5f20723ed5af4362" dependencies = [ "env_logger", "fs-err", @@ -2586,9 +2749,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.19.4" +version = "0.19.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" +checksum = "be59336a868de7c379eace9450563c2d7f4a0b7ab936835ec5a340dcd8d9a5ed" dependencies = [ "blake3", "cc", @@ -2603,12 +2766,11 @@ dependencies = [ "miden-crypto-derive", "num", "num-complex", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_core 0.9.5", "rand_hc", "rayon", - "rocksdb", "sha2", "sha3", "subtle", @@ -2631,9 +2793,9 @@ dependencies = [ [[package]] name = "miden-debug-types" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19123e896f24b575e69921a79a39a0a4babeb98404a8601017feb13b75d653b3" +checksum = "cd1494f102ad5b9fa43e391d2601186dc601f41ab7dcd8a23ecca9bf3ef930f4" dependencies = [ "memchr", "miden-crypto", @@ -2643,7 +2805,7 @@ dependencies = [ "miden-utils-sync", "paste", "serde", - "serde_spanned 1.0.4", + "serde_spanned", "thiserror 2.0.18", ] @@ -2656,11 +2818,22 @@ dependencies = [ "unicode-width 0.1.14", ] +[[package]] +name = "miden-large-smt-backend-rocksdb" +version = "0.14.0-alpha.1" +dependencies = [ + "miden-crypto", + "miden-protocol", + "rayon", + "rocksdb", + "winter-utils", +] + [[package]] name = "miden-mast-package" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d6a322b91efa1bb71e224395ca1fb9ca00e2614f89427e35d8c42a903868a3" +checksum = "692185bfbe0ecdb28bf623f1f8c88282cd6727ba081a28e23b301bdde1b45be4" dependencies = [ "derive_more", "miden-assembly-syntax", @@ -2712,21 +2885,22 @@ dependencies = [ [[package]] name = "miden-network-monitor" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "axum", - "clap 4.5.54", + "clap", "hex", "humantime", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", "miden-testing", "miden-tx", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "reqwest", "serde", "serde_json", @@ -2740,16 +2914,16 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", - "clap 4.5.54", - "figment", + "clap", "fs-err", "hex", "humantime", "miden-node-block-producer", "miden-node-ntx-builder", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-rpc", "miden-node-store", "miden-node-utils", @@ -2761,26 +2935,26 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "assert_matches", "futures", "itertools 0.14.0", - "miden-block-prover", "miden-node-proto", "miden-node-proto-build", "miden-node-store", "miden-node-test-macro", "miden-node-utils", + "miden-node-validator", "miden-protocol", "miden-remote-prover-client", "miden-standards", "miden-tx", "miden-tx-batch-prover", "pretty_assertions", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rstest", "serial_test", "tempfile", @@ -2795,9 +2969,22 @@ dependencies = [ "winterfell", ] +[[package]] +name = "miden-node-db" +version = "0.14.0-alpha.1" +dependencies = [ + "deadpool", + "deadpool-diesel", + "deadpool-sync", + "diesel", + "miden-protocol", + "thiserror 2.0.18", + "tracing", +] + [[package]] name = "miden-node-grpc-error-macro" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "quote", "syn 2.0.114", @@ -2805,12 +2992,15 @@ dependencies = [ [[package]] name = "miden-node-ntx-builder" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", + "build-rs", + "diesel", + "diesel_migrations", "futures", - "indexmap 2.13.0", "libsqlite3-sys", + "miden-node-db", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", @@ -2818,7 +3008,9 @@ dependencies = [ "miden-remote-prover-client", "miden-standards", "miden-tx", + "rand_chacha", "rstest", + "tempfile", "thiserror 2.0.18", "tokio", "tokio-stream", @@ -2830,15 +3022,17 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "assert_matches", + "build-rs", "fs-err", "hex", - "http", + "http 1.4.0", "miden-node-grpc-error-macro", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -2854,21 +3048,26 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ + "build-rs", "fs-err", "miette", "protox", "tonic-prost-build", ] +[[package]] +name = "miden-node-rocksdb-cxx-linkage-fix" +version = "0.14.0-alpha.1" + [[package]] name = "miden-node-rpc" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "futures", - "http", + "http 1.4.0", "mediatype", "miden-air", "miden-node-proto", @@ -2896,59 +3095,68 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "assert_matches", + "build-rs", "criterion", "deadpool", "deadpool-diesel", - "deadpool-sync", "diesel", "diesel_migrations", "fs-err", + "futures", "hex", - "indexmap 2.13.0", + "indexmap", "libsqlite3-sys", + "miden-agglayer", + "miden-block-prover", "miden-crypto", + "miden-large-smt-backend-rocksdb", + "miden-node-db", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-test-macro", "miden-node-utils", "miden-protocol", + "miden-remote-prover-client", "miden-standards", "pretty_assertions", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "regex", "serde", + "tempfile", "termtree", "thiserror 2.0.18", "tokio", "tokio-stream", - "toml 0.9.11+spec-1.1.0", + "toml 1.0.3+spec-1.1.0", "tonic", "tonic-reflection", "tower-http", "tracing", + "url", ] [[package]] name = "miden-node-stress-test" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ - "clap 4.5.54", + "clap", "fs-err", "futures", "miden-air", - "miden-block-prover", "miden-node-block-producer", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-store", "miden-node-utils", "miden-protocol", "miden-standards", - "rand 0.9.2", + "rand", "rayon", "tokio", "tonic", @@ -2965,21 +3173,20 @@ dependencies = [ [[package]] name = "miden-node-utils" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", "bytes", - "figment", - "http", + "http 1.4.0", "http-body-util", "itertools 0.14.0", - "lru 0.16.3", + "lru", + "miden-node-rocksdb-cxx-linkage-fix", "miden-protocol", "opentelemetry", "opentelemetry-otlp", "opentelemetry_sdk", - "rand 0.9.2", - "serde", + "rand", "thiserror 2.0.18", "tokio", "tonic", @@ -2993,9 +3200,15 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", + "aws-config", + "aws-sdk-kms", + "build-rs", + "diesel", + "diesel_migrations", + "miden-node-db", "miden-node-proto", "miden-node-proto-build", "miden-node-utils", @@ -3012,9 +3225,9 @@ dependencies = [ [[package]] name = "miden-processor" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a659fac55de14647e2695f03d96b83ff94fe65fd31e74d81c225ec52af25acf" +checksum = "0e09f7916b1e7505f74a50985a185fdea4c0ceb8f854a34c90db28e3f7da7ab6" dependencies = [ "itertools 0.14.0", "miden-air", @@ -3032,9 +3245,9 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "785be319a826c9cb43d2e1a41a1fb1eee3f2baafe360e0d743690641f7c93ad5" +checksum = "8a88effeac994eb55b8dc4f93fbfd71a5d916dfaba1099896e27a0ee42c488c1" dependencies = [ "bech32", "fs-err", @@ -3049,8 +3262,8 @@ dependencies = [ "miden-protocol-macros", "miden-utils-sync", "miden-verifier", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_xoshiro", "regex", "semver 1.0.27", @@ -3063,9 +3276,9 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dc854c1b9e49e82d3f39c5710345226e0b2a62ec0ea220c616f1f3a099cfb3" +checksum = "4bb28b730005e5f8b08d615ea9216f8cab77b3a7439fa54d5e39d2ec43ef53a3" dependencies = [ "proc-macro2", "quote", @@ -3074,9 +3287,9 @@ dependencies = [ [[package]] name = "miden-prover" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5df61f50f27886f6f777d6e0cdf785f7db87dd881799a84a801e7330c189c8" +checksum = "d45e30526be72b8af0fd1d8b24c9cba8ac1187ca335dcee38b8e5e20234e7698" dependencies = [ "miden-air", "miden-debug-types", @@ -3088,18 +3301,20 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ "anyhow", + "assert_matches", "async-trait", - "axum", - "bytes", - "clap 4.5.54", - "http", + "build-rs", + "clap", + "fs-err", + "http 1.4.0", "humantime", "miden-block-prover", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -3108,36 +3323,27 @@ dependencies = [ "miden-tx-batch-prover", "miette", "opentelemetry", - "pingora", - "pingora-core", - "pingora-limits", - "pingora-proxy", - "prometheus 0.14.0", "prost", - "reqwest", - "semver 1.0.27", - "serde", - "serde_qs", - "thiserror 2.0.18", + "serial_test", "tokio", "tokio-stream", "tonic", "tonic-health", "tonic-prost", "tonic-prost-build", + "tonic-reflection", "tonic-web", "tower-http", "tracing", - "tracing-opentelemetry", - "uuid", ] [[package]] name = "miden-remote-prover-client" -version = "0.13.7" +version = "0.14.0-alpha.1" dependencies = [ + "build-rs", "fs-err", - "getrandom 0.3.4", + "getrandom 0.4.1", "miden-node-proto-build", "miden-protocol", "miden-tx", @@ -3154,9 +3360,9 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e33771fc35e1e640582bcd26c88b2ab449dd3a70888b315546d0d3447f4bb3" +checksum = "2cef036bbfec29acba92751a13d05844bbcf080140201097b419c9ad1927e367" dependencies = [ "fs-err", "miden-assembly", @@ -3164,7 +3370,7 @@ dependencies = [ "miden-core-lib", "miden-processor", "miden-protocol", - "rand 0.9.2", + "rand", "regex", "thiserror 2.0.18", "walkdir", @@ -3172,9 +3378,9 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae5d41a888d1a5e520a9312a170975d0fbadefb1b9200543cebdf54dd0960310" +checksum = "e980777d0f7e6069942b14d4e7cb3d4d137b323ddfa15722a3bd21e9d13fdd2e" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3182,21 +3388,23 @@ dependencies = [ "miden-assembly", "miden-block-prover", "miden-core-lib", + "miden-crypto", "miden-processor", "miden-protocol", "miden-standards", "miden-tx", "miden-tx-batch-prover", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", + "thiserror 2.0.18", "winterfell", ] [[package]] name = "miden-tx" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "430e4ee02b5efb71b104926e229441e0071a93a259a70740bf8c436495caa64f" +checksum = "c67e0df9adcf29c9111df65acf408ae05952b8bc6569f571963676f97668d83f" dependencies = [ "miden-processor", "miden-protocol", @@ -3208,9 +3416,9 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.13.3" +version = "0.14.0-alpha.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03bc209b6487ebac0de230461e229a99d17ed73596c7d99fc59eea47a28a89cc" +checksum = "ba29f8f6ecae671eff8b52b4c19eca8db5964c0b45b5d68c3ce38a57a8367931" dependencies = [ "miden-protocol", "miden-tx", @@ -3218,9 +3426,9 @@ dependencies = [ [[package]] name = "miden-utils-core-derive" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa207ffd8b26a79d9b5b246a352812f0015c0bb8f75492ec089c5c8e6d5f9e2b" +checksum = "a1b1d490e6d7b509622d3c2cc69ffd66ad48bf953dc614579b568fe956ce0a6c" dependencies = [ "proc-macro2", "quote", @@ -3229,9 +3437,9 @@ dependencies = [ [[package]] name = "miden-utils-diagnostics" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2f55477d410542a5d8990ca04856adf5bef91bfa3b54ca3c03a5ff14a6e25c" +checksum = "52658f6dc091c1c78e8b35ee3e7ff3dad53051971a3c514e461f581333758fe7" dependencies = [ "miden-crypto", "miden-debug-types", @@ -3242,18 +3450,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" +checksum = "eeff7bcb7875b222424bdfb657a7cf21a55e036aa7558ebe1f5d2e413b440d0d" dependencies = [ "thiserror 2.0.18", ] [[package]] name = "miden-utils-sync" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da7fa8f5fd27f122c83f55752f2a964bbfc2b713de419e9c152f7dcc05c194ec" +checksum = "41d53d1ab5b275d8052ad9c4121071cb184bc276ee74354b0d8a2075e5c1d1f0" dependencies = [ "lock_api", "loom", @@ -3262,9 +3470,9 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" +checksum = "b13816663794beb15c8a4721c15252eb21f3b3233525684f60c7888837a98ff4" dependencies = [ "miden-air", "miden-core", @@ -3354,7 +3562,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", - "simd-adler32", ] [[package]] @@ -3384,68 +3591,10 @@ dependencies = [ ] [[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe 0.1.6", - "openssl-sys", - "schannel", - "security-framework 2.11.1", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "neli" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" -dependencies = [ - "bitflags 2.10.0", - "byteorder", - "derive_builder", - "getset", - "libc", - "log", - "neli-proc-macros", - "parking_lot", -] - -[[package]] -name = "neli-proc-macros" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" -dependencies = [ - "either", - "proc-macro2", - "quote", - "serde", - "syn 2.0.114", -] - -[[package]] -name = "new_debug_unreachable" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" - -[[package]] -name = "nix" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset", -] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" [[package]] name = "nom" @@ -3501,9 +3650,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-derive" @@ -3563,7 +3712,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.5.2", + "hermit-abi", "libc", ] @@ -3600,56 +3749,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" -dependencies = [ - "bitflags 2.10.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - [[package]] name = "openssl-probe" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" -[[package]] -name = "openssl-sys" -version = "0.9.111" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.31.0" @@ -3670,7 +3775,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ - "http", + "http 1.4.0", "opentelemetry", "opentelemetry-proto", "opentelemetry_sdk", @@ -3704,17 +3809,17 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.2", + "rand", "thiserror 2.0.18", "tokio", "tokio-stream", ] [[package]] -name = "os_str_bytes" -version = "6.6.1" +name = "outref" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" +checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" [[package]] name = "owo-colors" @@ -3722,6 +3827,16 @@ version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +[[package]] +name = "page_size" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "parking_lot" version = "0.12.5" @@ -3751,29 +3866,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "pear" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" -dependencies = [ - "inlinable_string", - "pear_codegen", - "yansi", -] - -[[package]] -name = "pear_codegen" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" -dependencies = [ - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn 2.0.114", -] - [[package]] name = "percent-encoding" version = "2.3.2" @@ -3787,7 +3879,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.13.0", + "indexmap", ] [[package]] @@ -3831,251 +3923,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pingora" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a1f02a6347e81953ab831fdcf090a028db12d67ec3badf47831d1299dac6e20" -dependencies = [ - "pingora-core", - "pingora-http", - "pingora-load-balancing", - "pingora-proxy", - "pingora-timeout", -] - -[[package]] -name = "pingora-cache" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" -dependencies = [ - "ahash", - "async-trait", - "blake2", - "bytes", - "cf-rustracing", - "cf-rustracing-jaeger", - "hex", - "http", - "httparse", - "httpdate", - "indexmap 1.9.3", - "log", - "lru 0.14.0", - "once_cell", - "parking_lot", - "pingora-core", - "pingora-error", - "pingora-header-serde", - "pingora-http", - "pingora-lru", - "pingora-timeout", - "rand 0.8.5", - "regex", - "rmp", - "rmp-serde", - "serde", - "strum", - "tokio", -] - -[[package]] -name = "pingora-core" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" -dependencies = [ - "ahash", - "async-trait", - "brotli", - "bytes", - "chrono", - "clap 3.2.25", - "daemonize", - "derivative", - "flate2", - "futures", - "h2", - "http", - "httparse", - "httpdate", - "libc", - "log", - "nix", - "once_cell", - "openssl-probe 0.1.6", - "parking_lot", - "percent-encoding", - "pingora-error", - "pingora-http", - "pingora-pool", - "pingora-runtime", - "pingora-timeout", - "prometheus 0.13.4", - "rand 0.8.5", - "regex", - "serde", - "serde_yaml", - "sfv", - "socket2", - "strum", - "strum_macros", - "tokio", - "tokio-test", - "unicase", - "windows-sys 0.59.0", - "zstd", -] - -[[package]] -name = "pingora-error" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52119570d3f4644e09654ad24df2b7d851bf12eaa8c4148b4674c7f90916598e" - -[[package]] -name = "pingora-header-serde" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "252a16def05c7adbbdda776e87b2be36e9481c8a77249207a2f3b563e8933b35" -dependencies = [ - "bytes", - "http", - "httparse", - "pingora-error", - "pingora-http", - "thread_local", - "zstd", - "zstd-safe", -] - -[[package]] -name = "pingora-http" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3542fd0fd0a83212882c5066ae739ba51804f20d624ff7e12ec85113c5c89a" -dependencies = [ - "bytes", - "http", - "pingora-error", -] - -[[package]] -name = "pingora-ketama" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5dd8546b1874d5cfca594375c1cfb852c3dffd4f060428fa031a6e790dea18" -dependencies = [ - "crc32fast", -] - -[[package]] -name = "pingora-limits" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" -dependencies = [ - "ahash", -] - -[[package]] -name = "pingora-load-balancing" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5bb0314830a64b73b50b3782f3089f87947b61b4324c804d6f8d4ff9ce1c70" -dependencies = [ - "arc-swap", - "async-trait", - "derivative", - "fnv", - "futures", - "http", - "log", - "pingora-core", - "pingora-error", - "pingora-http", - "pingora-ketama", - "pingora-runtime", - "rand 0.8.5", - "tokio", -] - -[[package]] -name = "pingora-lru" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" -dependencies = [ - "arrayvec", - "hashbrown 0.16.1", - "parking_lot", - "rand 0.8.5", -] - -[[package]] -name = "pingora-pool" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "996c574f30a6e1ad10b47ac1626a86e0e47d5075953dd049d60df16ba5f7076e" -dependencies = [ - "crossbeam-queue", - "log", - "lru 0.14.0", - "parking_lot", - "pingora-timeout", - "thread_local", - "tokio", -] - -[[package]] -name = "pingora-proxy" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c4097fd2639905bf5b81f3618551cd826d5e03aac063e17fd7a4137f19c1a5b" -dependencies = [ - "async-trait", - "bytes", - "clap 3.2.25", - "futures", - "h2", - "http", - "log", - "once_cell", - "pingora-cache", - "pingora-core", - "pingora-error", - "pingora-http", - "rand 0.8.5", - "regex", - "tokio", -] - -[[package]] -name = "pingora-runtime" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccc165021cf55a39b9e760121b22c4260b17a0b2c530d5b93092fc5bc765b94" -dependencies = [ - "once_cell", - "rand 0.8.5", - "thread_local", - "tokio", -] - -[[package]] -name = "pingora-timeout" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548cd21d41611c725827677937e68f2cd008bbfa09f3416d3fbad07e1e42f6d7" -dependencies = [ - "once_cell", - "parking_lot", - "pin-project-lite", - "thread_local", - "tokio", -] - [[package]] name = "pkcs8" version = "0.10.2" @@ -4174,133 +4021,54 @@ dependencies = [ name = "precomputed-hash" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" - -[[package]] -name = "pretty_assertions" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" -dependencies = [ - "diff", - "yansi", -] - -[[package]] -name = "prettyplease" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" -dependencies = [ - "proc-macro2", - "syn 2.0.114", -] - -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit 0.23.10+spec-1.0.0", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" -dependencies = [ - "proc-macro2", - "quote", -] +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] -name = "proc-macro-error2" -version = "2.0.1" +name = "pretty_assertions" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ - "proc-macro-error-attr2", - "proc-macro2", - "quote", - "syn 2.0.114", + "diff", + "yansi", ] [[package]] -name = "proc-macro2" -version = "1.0.106" +name = "prettyplease" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ - "unicode-ident", + "proc-macro2", + "syn 2.0.114", ] [[package]] -name = "proc-macro2-diagnostics" -version = "0.10.1" +name = "primitive-types" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +checksum = "721a1da530b5a2633218dc9f75713394c983c352be88d2d7c9ee85e2c4c21794" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", - "version_check", - "yansi", + "fixed-hash", + "uint", ] [[package]] -name = "prometheus" -version = "0.13.4" +name = "proc-macro-crate" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "cfg-if", - "fnv", - "lazy_static", - "memchr", - "parking_lot", - "protobuf 2.28.0", - "thiserror 1.0.69", + "toml_edit", ] [[package]] -name = "prometheus" -version = "0.14.0" +name = "proc-macro2" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ - "cfg-if", - "fnv", - "lazy_static", - "memchr", - "parking_lot", - "protobuf 3.7.2", - "thiserror 2.0.18", + "unicode-ident", ] [[package]] @@ -4311,10 +4079,10 @@ checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.10.0", + "bitflags", "num-traits", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_xorshift", "regex-syntax", "rusty-fork", @@ -4349,8 +4117,8 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "heck 0.5.0", - "itertools 0.14.0", + "heck", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -4372,7 +4140,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.114", @@ -4399,32 +4167,6 @@ dependencies = [ "prost", ] -[[package]] -name = "protobuf" -version = "2.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" - -[[package]] -name = "protobuf" -version = "3.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" -dependencies = [ - "once_cell", - "protobuf-support", - "thiserror 1.0.69", -] - -[[package]] -name = "protobuf-support" -version = "3.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" -dependencies = [ - "thiserror 1.0.69", -] - [[package]] name = "protox" version = "0.9.1" @@ -4458,7 +4200,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.10.0", + "bitflags", "memchr", "unicase", ] @@ -4479,49 +4221,84 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] -name = "quote" -version = "1.0.44" +name = "quinn" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ - "proc-macro2", + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.23.36", + "socket2 0.5.10", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", ] [[package]] -name = "r-efi" -version = "5.3.0" +name = "quinn-proto" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand", + "ring", + "rustc-hash", + "rustls 0.23.36", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] [[package]] -name = "rand" -version = "0.8.5" +name = "quinn-udp" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ + "cfg_aliases", "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.60.2", ] [[package]] -name = "rand" -version = "0.9.2" +name = "quote" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.5", + "proc-macro2", ] [[package]] -name = "rand_chacha" -version = "0.3.1" +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", + "rand_chacha", + "rand_core 0.9.5", ] [[package]] @@ -4605,7 +4382,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags", ] [[package]] @@ -4631,6 +4408,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" + [[package]] name = "regex-syntax" version = "0.8.8" @@ -4645,35 +4428,36 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.28" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64", "bytes", "encoding_rs", "futures-core", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", - "hyper-tls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "js-sys", "log", "mime", - "native-tls", "percent-encoding", "pin-project-lite", + "quinn", + "rustls 0.23.36", "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-native-tls", + "tokio-rustls 0.26.4", "tower", "tower-http", "tower-service", @@ -4707,25 +4491,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rmp" -version = "0.8.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ba8be72d372b2c9b35542551678538b562e7cf86c3315773cae48dfbfe7790c" -dependencies = [ - "num-traits", -] - -[[package]] -name = "rmp-serde" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f81bee8c8ef9b577d1681a70ebbc962c232461e397b22c208c43c04b67a155" -dependencies = [ - "rmp", - "serde", -] - [[package]] name = "rocksdb" version = "0.24.0" @@ -4775,16 +4540,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "rust_decimal" -version = "1.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" -dependencies = [ - "arrayvec", - "num-traits", -] - [[package]] name = "rustc-demangle" version = "0.1.27" @@ -4821,11 +4576,11 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.10.0", + "bitflags", "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4834,24 +4589,37 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "bitflags 2.10.0", + "bitflags", "errno", "libc", "linux-raw-sys 0.11.0", "windows-sys 0.61.2", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.103.9", "subtle", "zeroize", ] @@ -4862,10 +4630,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", + "security-framework", ] [[package]] @@ -4874,15 +4642,54 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ + "web-time", "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls 0.23.36", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki 0.103.9", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4951,6 +4758,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sdd" version = "3.0.10" @@ -4971,26 +4788,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.10.0", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - [[package]] name = "security-framework" version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.10.0", + "bitflags", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -5086,26 +4890,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "serde_qs" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" -dependencies = [ - "percent-encoding", - "serde", - "thiserror 2.0.18", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - [[package]] name = "serde_spanned" version = "1.0.4" @@ -5127,18 +4911,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap 1.9.3", - "ryu", - "serde", - "yaml-rust", -] - [[package]] name = "serial_test" version = "3.3.1" @@ -5165,17 +4937,6 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "sfv" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" -dependencies = [ - "base64", - "indexmap 2.13.0", - "rust_decimal", -] - [[package]] name = "sha2" version = "0.10.9" @@ -5232,17 +4993,11 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "simd-adler32" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" - [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" @@ -5262,6 +5017,16 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socket2" version = "0.6.2" @@ -5309,6 +5074,12 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "string_cache" version = "0.8.9" @@ -5327,42 +5098,14 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a8f8038e7e7969abb3f1b7c2a811225e9296da208539e0f79c5251d6cac0025" dependencies = [ - "vte", -] - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "strum" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" -dependencies = [ - "strum_macros", + "vte", ] [[package]] -name = "strum_macros" -version = "0.26.4" +name = "strsim" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" -dependencies = [ - "heck 0.5.0", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.114", -] +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" @@ -5439,7 +5182,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.10.0", + "bitflags", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -5513,9 +5256,9 @@ dependencies = [ [[package]] name = "termtree" -version = "0.5.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" +checksum = "d4d1330fe7f7f872cd05165130b10602d667b205fd85be09be2814b115d4ced9" [[package]] name = "textwrap" @@ -5577,21 +5320,11 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "thrift_codec" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d957f535b242b91aa9f47bde08080f9a6fef276477e55b0079979d002759d5" -dependencies = [ - "byteorder", - "trackable", -] - [[package]] name = "time" -version = "0.3.45" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -5604,15 +5337,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.25" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -5638,6 +5371,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.49.0" @@ -5650,7 +5398,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.2", "tokio-macros", "windows-sys 0.61.2", ] @@ -5667,12 +5415,12 @@ dependencies = [ ] [[package]] -name = "tokio-native-tls" -version = "0.3.1" +name = "tokio-rustls" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "native-tls", + "rustls 0.21.12", "tokio", ] @@ -5682,7 +5430,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls", + "rustls 0.23.36", "tokio", ] @@ -5698,17 +5446,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "tokio-test" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6d24790a10a7af737693a3e8f1d03faef7e6ca0cc99aae5066f533766de545" -dependencies = [ - "futures-core", - "tokio", - "tokio-stream", -] - [[package]] name = "tokio-util" version = "0.7.18" @@ -5722,27 +5459,15 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned 0.6.9", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", -] - [[package]] name = "toml" version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap 2.13.0", + "indexmap", "serde_core", - "serde_spanned 1.0.4", + "serde_spanned", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", @@ -5750,12 +5475,18 @@ dependencies = [ ] [[package]] -name = "toml_datetime" -version = "0.6.11" +name = "toml" +version = "1.0.3+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +checksum = "c7614eaf19ad818347db24addfa201729cf2a9b6fdfd9eb0ab870fcacc606c0c" dependencies = [ - "serde", + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime 1.0.0+spec-1.1.0", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] @@ -5768,17 +5499,12 @@ dependencies = [ ] [[package]] -name = "toml_edit" -version = "0.22.27" +name = "toml_datetime" +version = "1.0.0+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" dependencies = [ - "indexmap 2.13.0", - "serde", - "serde_spanned 0.6.9", - "toml_datetime 0.6.11", - "toml_write", - "winnow", + "serde_core", ] [[package]] @@ -5787,7 +5513,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.13.0", + "indexmap", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -5795,19 +5521,13 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.6+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow", ] -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - [[package]] name = "toml_writer" version = "1.0.6+spec-1.1.0" @@ -5824,20 +5544,20 @@ dependencies = [ "axum", "base64", "bytes", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "rustls-native-certs", - "socket2", + "socket2 0.6.2", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tokio-stream", "tower", "tower-layer", @@ -5919,8 +5639,8 @@ checksum = "75214f6b6bd28c19aa752ac09fdf0eea546095670906c21fe3940e180a4c43f2" dependencies = [ "base64", "bytes", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "pin-project", "tokio-stream", "tonic", @@ -5931,16 +5651,16 @@ dependencies = [ [[package]] name = "tonic-web-wasm-client" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "898cd44be5e23e59d2956056538f1d6b3c5336629d384ffd2d92e76f87fb98ff" +checksum = "e8e21e20b94f808d6f2244a5d960d02c28dd82066abddd2f27019bac0535f310" dependencies = [ "base64", "byteorder", "bytes", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", "httparse", "js-sys", @@ -5962,7 +5682,7 @@ checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.13.0", + "indexmap", "pin-project-lite", "slab", "sync_wrapper", @@ -5979,11 +5699,11 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags 2.10.0", + "bitflags", "bytes", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", "iri-string", "pin-project-lite", @@ -6040,9 +5760,9 @@ dependencies = [ [[package]] name = "tracing-forest" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" +checksum = "f09cb459317a3811f76644334473239d696cd8efc606963ae7d1c308cead3b74" dependencies = [ "chrono", "smallvec", @@ -6109,25 +5829,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trackable" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" -dependencies = [ - "trackable_derive", -] - -[[package]] -name = "trackable_derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -6157,19 +5858,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] -name = "unarray" -version = "0.1.4" +name = "uint" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] [[package]] -name = "uncased" -version = "0.9.10" +name = "unarray" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" -dependencies = [ - "version_check", -] +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" @@ -6236,6 +5940,12 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -6250,11 +5960,10 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ - "getrandom 0.3.4", "js-sys", "wasm-bindgen", ] @@ -6277,6 +5986,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + [[package]] name = "vte" version = "0.14.1" @@ -6329,6 +6044,15 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.108" @@ -6388,11 +6112,33 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + [[package]] name = "wasm-streams" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +checksum = "9d1ec4f6517c9e11ae630e200b2b65d193279042e28edd4a2cda233e46670bbb" dependencies = [ "futures-util", "js-sys", @@ -6401,6 +6147,18 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver 1.0.27", +] + [[package]] name = "web-sys" version = "0.3.85" @@ -6421,6 +6179,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-root-certs" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -6524,27 +6291,27 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.42.2", ] [[package]] name = "windows-sys" -version = "0.52.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] name = "windows-sys" -version = "0.59.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.6", ] @@ -6567,6 +6334,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -6615,6 +6397,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -6633,6 +6421,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -6651,6 +6445,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -6681,6 +6481,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -6699,6 +6505,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -6717,6 +6529,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -6735,6 +6553,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -6838,7 +6662,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4ff3b651754a7bd216f959764d0a5ab6f4b551c9a3a08fb9ccecbed594b614a" dependencies = [ - "rand 0.9.2", + "rand", "winter-utils", ] @@ -6880,6 +6704,88 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.114", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.114", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver 1.0.27", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" @@ -6898,13 +6804,10 @@ dependencies = [ ] [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "xmlparser" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" [[package]] name = "yansi" @@ -6937,18 +6840,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" +checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" +checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" dependencies = [ "proc-macro2", "quote", @@ -7017,34 +6920,6 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" - -[[package]] -name = "zstd" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" -dependencies = [ - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "2.0.16+zstd.1.5.7" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" -dependencies = [ - "cc", - "pkg-config", -] +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" diff --git a/Cargo.toml b/Cargo.toml index a1a9387756..9da92a6db6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,10 +5,13 @@ members = [ "bin/remote-prover", "bin/stress-test", "crates/block-producer", + "crates/db", "crates/grpc-error-macro", + "crates/large-smt-backend-rocksdb", "crates/ntx-builder", "crates/proto", "crates/remote-prover-client", + "crates/rocksdb-cxx-linkage-fix", "crates/rpc", "crates/store", "crates/test-macro", @@ -26,48 +29,64 @@ exclude = [".github/"] homepage = "https://miden.xyz" license = "MIT" readme = "README.md" -repository = "https://github.com/0xMiden/miden-node" -rust-version = "1.90" -version = "0.13.7" +repository = "https://github.com/0xMiden/node" +rust-version = "1.91" +version = "0.14.0-alpha.1" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] opt-level = 2 +# Avoid running the expensive debug assertion in winter-prover +[profile.test.package.winter-prover] +debug-assertions = false + [profile.release] debug = true [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.13" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.13" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.13" } -miden-node-proto = { path = "crates/proto", version = "0.13" } -miden-node-proto-build = { path = "proto", version = "0.13" } -miden-node-rpc = { path = "crates/rpc", version = "0.13" } -miden-node-store = { path = "crates/store", version = "0.13" } -miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.13" } -miden-node-validator = { path = "crates/validator", version = "0.13" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } +miden-large-smt-backend-rocksdb = { path = "crates/large-smt-backend-rocksdb", version = "=0.14.0-alpha.1" } +miden-node-block-producer = { path = "crates/block-producer", version = "=0.14.0-alpha.1" } +miden-node-db = { path = "crates/db", version = "=0.14.0-alpha.1" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "=0.14.0-alpha.1" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "=0.14.0-alpha.1" } +miden-node-proto = { path = "crates/proto", version = "=0.14.0-alpha.1" } +miden-node-proto-build = { path = "proto", version = "=0.14.0-alpha.1" } +miden-node-rpc = { path = "crates/rpc", version = "=0.14.0-alpha.1" } +miden-node-store = { path = "crates/store", version = "=0.14.0-alpha.1" } +miden-node-test-macro = { path = "crates/test-macro" } +miden-node-utils = { path = "crates/utils", version = "=0.14.0-alpha.1" } +miden-node-validator = { path = "crates/validator", version = "=0.14.0-alpha.1" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "=0.14.0-alpha.1" } +# Temporary workaround until +# is part of `rocksdb-rust` release +miden-node-rocksdb-cxx-linkage-fix = { path = "crates/rocksdb-cxx-linkage-fix", version = "=0.14.0-alpha.1" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { version = "0.13" } -miden-protocol = { default-features = false, version = "0.13" } -miden-standards = { version = "0.13" } -miden-testing = { version = "0.13" } -miden-tx = { default-features = false, version = "0.13" } -miden-tx-batch-prover = { version = "0.13" } +miden-block-prover = { version = "=0.14.0-alpha.1" } +miden-protocol = { default-features = false, version = "=0.14.0-alpha.1" } +miden-standards = { version = "=0.14.0-alpha.1" } +miden-testing = { version = "=0.14.0-alpha.1" } +miden-tx = { default-features = false, version = "=0.14.0-alpha.1" } +miden-tx-batch-prover = { version = "=0.14.0-alpha.1" } # Other miden dependencies. These should align with those expected by miden-base. -miden-air = { features = ["std", "testing"], version = "0.20" } -miden-crypto = { default-features = false, version = "0.19" } +miden-air = { features = ["std", "testing"], version = "0.20" } + +miden-crypto = { version = "0.19.7" } # External dependencies anyhow = { version = "1.0" } assert_matches = { version = "1.5" } async-trait = { version = "0.1" } +build-rs = { version = "0.3" } clap = { features = ["derive"], version = "4.5" } +deadpool = { default-features = false, version = "0.12" } +deadpool-diesel = { version = "0.6" } +deadpool-sync = { default-features = false, version = "0.1" } +diesel = { version = "2.3" } +diesel_migrations = { version = "2.3" } fs-err = { version = "3" } futures = { version = "0.3" } hex = { version = "0.4" } @@ -84,13 +103,16 @@ pretty_assertions = { version = "1.4" } prost = { default-features = false, version = "=0.14.3" } protox = { version = "=0.9.1" } rand = { version = "0.9" } -rand_chacha = { version = "0.9" } +rand_chacha = { default-features = false, version = "0.9" } +reqwest = { version = "0.13" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } +tempfile = { version = "3" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } -toml = { version = "0.9" } +tokio-util = { version = "0.7" } +toml = "1.0" tonic = { default-features = false, version = "0.14" } tonic-health = { version = "0.14" } tonic-prost = { version = "0.14" } @@ -107,6 +129,7 @@ url = { features = ["serde"], version = "2.5" } # Pedantic lints are set to a lower priority which allows lints in the group to be selectively enabled. pedantic = { level = "warn", priority = -1 } +allow_attributes = "deny" cast_possible_truncation = "allow" # Overly many instances especially regarding indices. collapsible-if = "allow" # Too new to enforce. from_iter_instead_of_collect = "allow" # at times `FromIter` is much more readable @@ -119,3 +142,7 @@ must_use_candidate = "allow" # This marks many fn's which isn't helpfu needless_for_each = "allow" # Context dependent if that's useful. should_panic_without_expect = "allow" # We don't care about the specific panic message. # End of pedantic lints. + +# Configure `cargo-typos` +[workspace.metadata.typos] +files.extend-exclude = ["*.svg"] # Ignore SVG files. diff --git a/Makefile b/Makefile index 64aa55bf4f..33ab72a885 100644 --- a/Makefile +++ b/Makefile @@ -7,8 +7,8 @@ help: # -- variables ------------------------------------------------------------------------------------ WARNINGS=RUSTDOCFLAGS="-D warnings" -BUILD_PROTO=BUILD_PROTO=1 CONTAINER_RUNTIME ?= docker +STRESS_TEST_DATA_DIR ?= stress-test-store-$(shell date +%Y%m%d-%H%M%S) # -- linting -------------------------------------------------------------------------------------- @@ -85,7 +85,7 @@ test: ## Runs all tests .PHONY: check check: ## Check all targets and features for errors without code generation - ${BUILD_PROTO} cargo check --all-features --all-targets --locked --workspace + cargo check --all-features --all-targets --locked --workspace .PHONY: check-features check-features: ## Checks all feature combinations compile without warnings using cargo-hack @@ -95,18 +95,27 @@ check-features: ## Checks all feature combinations compile without warnings usin .PHONY: build build: ## Builds all crates and re-builds protobuf bindings for proto crates - ${BUILD_PROTO} cargo build --locked --workspace - ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover # no-std compatible build + cargo build --locked --workspace + cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover # no-std compatible build # --- installing ---------------------------------------------------------------------------------- .PHONY: install-node install-node: ## Installs node - ${BUILD_PROTO} cargo install --path bin/node --locked + cargo install --path bin/node --locked .PHONY: install-remote-prover install-remote-prover: ## Install remote prover's CLI - $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --features concurrent --locked + cargo install --path bin/remote-prover --bin miden-remote-prover --locked + +.PHONY: stress-test-smoke +stress-test: ## Runs stress-test benchmarks + cargo build --release --locked -p miden-node-stress-test + @mkdir -p $(STRESS_TEST_DATA_DIR) + ./target/release/miden-node-stress-test seed-store --data-directory $(STRESS_TEST_DATA_DIR) --num-accounts 500 --public-accounts-percentage 50 + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-state + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-notes + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 .PHONY: install-stress-test install-stress-test: ## Installs stress-test binary diff --git a/README.md b/README.md index bca1cdbf4f..a06c00e4ab 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Miden node -[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/0xMiden/miden-node/blob/main/LICENSE) +[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/0xMiden/node/blob/main/LICENSE) [![CI](https://github.com/0xMiden/node/actions/workflows/ci.yml/badge.svg)](https://github.com/0xMiden/node/actions/workflows/ci.yml) [![RUST_VERSION](https://img.shields.io/badge/rustc-1.90+-lightgray.svg)](https://www.rust-lang.org/tools/install) [![crates.io](https://img.shields.io/crates/v/miden-node)](https://crates.io/crates/miden-node) @@ -33,15 +33,17 @@ The documentation in the `docs/external` folder is built using Docusaurus and is Developer documentation and onboarding guide is available [here](https://0xMiden.github.io/miden-node/developer/index.html). -At minimum, please see our [contributing](CONTRIBUTING.md) guidelines and our [makefile](Makefile) for example workflows +At minimum, please see our [contributing](https://github.com/0xMiden/.github?tab=contributing-ov-file) guidelines and our [makefile](Makefile) for example workflows e.g. run the testsuite using ```sh make test ``` -Note that we do _not_ accept low-effort contributions or AI generated code. For typos and documentation errors please -rather open an issue. +In particular, please note that we do _not_ accept [low-effort contributions](https://github.com/0xMiden/.github?tab=contributing-ov-file#contribution-quality) or AI generated code. For typos and documentation errors please open an issue instead. + +> [!IMPORTANT] +> PRs will be closed unless you have been assigned an issue by a maintainer. ## License diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 11c2b19059..357169c025 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -26,9 +26,9 @@ miden-protocol = { features = ["std", "testing"], workspace = true } miden-standards = { workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["std"], workspace = true } -rand = { version = "0.9" } -rand_chacha = { version = "0.9" } -reqwest = { features = ["json"], version = "0.12" } +rand = { workspace = true } +rand_chacha = { workspace = true } +reqwest = { features = ["json", "query"], workspace = true } serde = { features = ["derive"], version = "1.0" } serde_json = { version = "1.0" } sha2 = { version = "0.10" } @@ -37,3 +37,6 @@ tonic = { features = ["codegen", "tls-native-roots", "transport"], wo tonic-health = { workspace = true } tracing = { workspace = true } url = { features = ["serde"], workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/network-monitor/build.rs b/bin/network-monitor/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/bin/network-monitor/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index c044267331..dd004ff0d0 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -21,12 +21,10 @@ use miden_protocol::note::{ Note, NoteAssets, NoteAttachment, - NoteExecutionHint, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, - NoteTag, + NoteStorage, NoteType, }; use miden_protocol::transaction::{InputNotes, PartialBlockchain, TransactionArgs}; @@ -34,7 +32,7 @@ use miden_protocol::utils::Deserializable; use miden_protocol::{Felt, Word}; use miden_standards::account::interface::{AccountInterface, AccountInterfaceExt}; use miden_standards::code_builder::CodeBuilder; -use miden_standards::note::NetworkAccountTarget; +use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint}; use miden_tx::auth::BasicAuthenticator; use miden_tx::utils::Serializable; use miden_tx::{LocalTransactionProver, TransactionExecutor}; @@ -43,6 +41,9 @@ use rand_chacha::ChaCha20Rng; use tokio::sync::{Mutex, watch}; use tracing::{error, info, instrument, warn}; +/// Number of consecutive increment failures before re-syncing the wallet account from the RPC. +const RESYNC_FAILURE_THRESHOLD: usize = 3; + use crate::COMPONENT; use crate::config::MonitorConfig; use crate::deploy::counter::COUNTER_SLOT_NAME; @@ -397,6 +398,7 @@ pub async fn run_increment_task( let mut rng = ChaCha20Rng::from_os_rng(); let mut interval = tokio::time::interval(config.counter_increment_interval); + let mut consecutive_failures: usize = 0; loop { interval.tick().await; @@ -416,6 +418,8 @@ pub async fn run_increment_task( .await { Ok((tx_id, final_account, block_height)) => { + consecutive_failures = 0; + let target_value = handle_increment_success( &mut wallet_account, &final_account, @@ -435,7 +439,21 @@ pub async fn run_increment_task( } }, Err(e) => { + consecutive_failures += 1; last_error = Some(handle_increment_failure(&mut details, &e)); + + if consecutive_failures >= RESYNC_FAILURE_THRESHOLD { + if try_resync_wallet_account( + &mut rpc_client, + &mut wallet_account, + &mut data_store, + ) + .await + .is_ok() + { + consecutive_failures = 0; + } + } }, } @@ -480,6 +498,37 @@ fn handle_increment_success( Ok(new_expected) } +/// Re-sync the wallet account from the RPC after repeated failures. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.try_resync_wallet_account", + skip_all, + fields(account.id = %wallet_account.id()), + level = "warn", + err, +)] +async fn try_resync_wallet_account( + rpc_client: &mut RpcClient, + wallet_account: &mut Account, + data_store: &mut MonitorDataStore, +) -> Result<()> { + let fresh_account = fetch_wallet_account(rpc_client, wallet_account.id()) + .await + .inspect_err(|e| { + error!(account.id = %wallet_account.id(), err = ?e, "failed to re-sync wallet account from RPC"); + })? + .context("wallet account not found on-chain during re-sync") + .inspect_err(|e| { + error!(account.id = %wallet_account.id(), err = ?e, "wallet account not found on-chain during re-sync"); + })?; + + info!(account.id = %wallet_account.id(), "wallet account re-synced from RPC"); + *wallet_account = fresh_account; + data_store.update_account(wallet_account.clone()); + Ok(()) +} + /// Handle the failure path when creating/submitting the network note fails. fn handle_increment_failure(details: &mut IncrementDetails, error: &anyhow::Error) -> String { error!("Failed to create and submit network note: {:?}", error); @@ -751,7 +800,7 @@ fn load_counter_account(file_path: &Path) -> Result { } /// Create and submit a network note that targets the counter account. -#[allow(clippy::too_many_arguments)] +#[expect(clippy::too_many_arguments)] #[instrument( parent = None, target = COMPONENT, @@ -858,12 +907,8 @@ fn create_network_note( .context("Failed to create NetworkAccountTarget for counter account")?; let attachment: NoteAttachment = target.into(); - let metadata = NoteMetadata::new( - wallet_account.id(), - NoteType::Public, - NoteTag::with_account_target(counter_account.id()), - ) - .with_attachment(attachment); + let metadata = + NoteMetadata::new(wallet_account.id(), NoteType::Public).with_attachment(attachment); let serial_num = Word::new([ Felt::new(rng.random()), @@ -872,7 +917,7 @@ fn create_network_note( Felt::new(rng.random()), ]); - let recipient = NoteRecipient::new(serial_num, script, NoteInputs::new(vec![])?); + let recipient = NoteRecipient::new(serial_num, script, NoteStorage::new(vec![])?); let network_note = Note::new(NoteAssets::new(vec![])?, metadata, recipient.clone()); Ok((network_note, recipient)) diff --git a/bin/network-monitor/src/deploy/counter.rs b/bin/network-monitor/src/deploy/counter.rs index a5ab3d3638..e517beb063 100644 --- a/bin/network-monitor/src/deploy/counter.rs +++ b/bin/network-monitor/src/deploy/counter.rs @@ -3,6 +3,7 @@ use std::path::Path; use anyhow::Result; +use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::{ Account, AccountBuilder, @@ -53,8 +54,9 @@ pub fn create_counter_account(owner_account_id: AccountId) -> Result { let component_code = CodeBuilder::default().compile_component_code("counter::program", script)?; - let account_code = AccountComponent::new(component_code, vec![counter_slot, owner_id_slot])? - .with_supports_all_types(); + let metadata = AccountComponentMetadata::new("counter::program").with_supports_all_types(); + let account_code = + AccountComponent::new(component_code, vec![counter_slot, owner_id_slot], metadata)?; let incr_nonce_auth: AccountComponent = IncrNonceAuthComponent.into(); diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index 235905f139..b89c09aa0c 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -11,7 +11,7 @@ use anyhow::{Context, Result}; use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; -use miden_protocol::account::{Account, AccountId, PartialAccount, PartialStorage}; +use miden_protocol::account::{Account, AccountId, PartialAccount, PartialStorage, StorageMapKey}; use miden_protocol::assembly::{ DefaultSourceManager, Library, @@ -308,7 +308,7 @@ impl DataStore for MonitorDataStore { &self, _account_id: AccountId, _map_root: Word, - _map_key: Word, + _map_key: StorageMapKey, ) -> Result { unimplemented!("Not needed") } diff --git a/bin/network-monitor/src/deploy/wallet.rs b/bin/network-monitor/src/deploy/wallet.rs index de687ab6d4..ba074a60f8 100644 --- a/bin/network-monitor/src/deploy/wallet.rs +++ b/bin/network-monitor/src/deploy/wallet.rs @@ -4,10 +4,10 @@ use std::path::Path; use anyhow::Result; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::auth::{AuthScheme, AuthSecretKey}; use miden_protocol::account::{Account, AccountFile, AccountStorageMode, AccountType}; use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; -use miden_standards::AuthScheme; +use miden_standards::AuthMethod; use miden_standards::account::wallets::create_basic_wallet; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -22,7 +22,9 @@ use crate::COMPONENT; pub fn create_wallet_account() -> Result<(Account, SecretKey)> { let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; let init_seed: [u8; 32] = rng.random(); let wallet_account = create_basic_wallet( diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 370d7bb105..1e50a173d9 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -47,7 +47,7 @@ pub struct FaucetTestDetails { struct PowChallengeResponse { challenge: String, target: u64, - #[allow(dead_code)] // Timestamp is part of API response but not used + #[expect(dead_code)] // Timestamp is part of API response but not used timestamp: u64, } @@ -55,7 +55,7 @@ struct PowChallengeResponse { #[derive(Debug, Deserialize)] struct GetTokensResponse { tx_id: String, - #[allow(dead_code)] // Note ID is part of API response but not used in monitoring + #[expect(dead_code)] // Note ID is part of API response but not used in monitoring note_id: String, } @@ -189,14 +189,15 @@ async fn perform_faucet_test( debug!("Generated account ID: {} (length: {})", account_id, account_id.len()); // Step 1: Request PoW challenge - let pow_url = faucet_url.join("/pow")?; - let response = client - .get(pow_url) - .query(&[("account_id", &account_id), ("amount", &MINT_AMOUNT.to_string())]) - .send() - .await?; + let mut pow_url = faucet_url.join("/pow")?; + pow_url + .query_pairs_mut() + .append_pair("account_id", &account_id) + .append_pair("amount", &MINT_AMOUNT.to_string()); - let response_text = response.text().await?; + let response = client.get(pow_url).send().await?; + + let response_text: String = response.text().await?; debug!("Faucet PoW response: {}", response_text); let challenge_response: PowChallengeResponse = serde_json::from_str(&response_text) @@ -215,21 +216,18 @@ async fn perform_faucet_test( debug!("Solved PoW challenge with nonce: {}", nonce); // Step 3: Request tokens with the solution - let tokens_url = faucet_url.join("/get_tokens")?; - - let response = client - .get(tokens_url) - .query(&[ - ("account_id", account_id.as_str()), - ("is_private_note", "false"), - ("asset_amount", &MINT_AMOUNT.to_string()), - ("challenge", &challenge_response.challenge), - ("nonce", &nonce.to_string()), - ]) - .send() - .await?; - - let response_text = response.text().await?; + let mut tokens_url = faucet_url.join("/get_tokens")?; + tokens_url + .query_pairs_mut() + .append_pair("account_id", account_id.as_str()) + .append_pair("is_private_note", "false") + .append_pair("asset_amount", &MINT_AMOUNT.to_string()) + .append_pair("challenge", &challenge_response.challenge) + .append_pair("nonce", &nonce.to_string()); + + let response = client.get(tokens_url).send().await?; + + let response_text: String = response.text().await?; let tokens_response: GetTokensResponse = serde_json::from_str(&response_text) .with_context(|| format!("Failed to parse tokens response: {response_text}"))?; diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index 791315d3b8..b103a60c42 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -8,6 +8,7 @@ use std::time::Duration; use anyhow::Context; use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverClient}; use miden_node_proto::generated as proto; +use miden_protocol::account::auth::AuthScheme; use miden_protocol::asset::{Asset, FungibleAsset}; use miden_protocol::note::NoteType; use miden_protocol::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; @@ -277,7 +278,7 @@ pub async fn generate_mock_transaction() -> anyhow::Result { // Create an account with basic authentication let account = mock_chain_builder - .add_existing_wallet(Auth::BasicAuth) + .add_existing_wallet(Auth::BasicAuth { auth_scheme: AuthScheme::Falcon512Rpo }) .context("Failed to add wallet to mock chain")?; // Create a fungible asset diff --git a/bin/node/.env b/bin/node/.env index fc4c2793e3..51a04794f9 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -10,8 +10,10 @@ MIDEN_NODE_STORE_RPC_URL= MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= -MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY= +MIDEN_NODE_VALIDATOR_KEY= +MIDEN_NODE_VALIDATOR_KMS_KEY_ID= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE= +MIDEN_NODE_NTX_DATA_DIRECTORY= diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index b6ade3b4da..700ce37060 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -33,6 +33,8 @@ miden-protocol = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] -figment = { features = ["env", "test", "toml"], version = "0.10" } miden-node-utils = { features = ["tracing-forest"], workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 832b0bb8d2..bf41b46d38 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,39 +1,47 @@ -FROM rust:1.90-slim-bullseye AS builder - +FROM rust:1.91-slim-bookworm AS chef # Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y llvm clang libclang-dev pkg-config libssl-dev libsqlite3-dev ca-certificates && \ + apt-get install -y \ + llvm \ + clang \ + libclang-dev \ + cmake \ + pkg-config \ + libssl-dev \ + libsqlite3-dev \ + ca-certificates && \ rm -rf /var/lib/apt/lists/* - +RUN cargo install cargo-chef WORKDIR /app -COPY ./Cargo.toml . -COPY ./Cargo.lock . -COPY ./bin ./bin -COPY ./crates ./crates -COPY ./proto ./proto -RUN cargo install --path bin/node --locked +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json -FROM debian:bullseye-slim +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --recipe-path recipe.json +# Build application +COPY . . +RUN cargo build --release --locked --bin miden-node -# Update machine & install required packages -# The installation of sqlite3 is needed for correct function of the SQLite database +# Base line runtime image with runtime dependencies installed. +FROM debian:bullseye-slim AS runtime-base RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y --no-install-recommends \ - sqlite3 \ + apt-get install -y --no-install-recommends sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node - +FROM runtime-base AS runtime +COPY --from=builder /app/target/release/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ org.opencontainers.image.url=https://0xMiden.github.io/ \ - org.opencontainers.image.documentation=https://github.com/0xMiden/miden-node \ - org.opencontainers.image.source=https://github.com/0xMiden/miden-node \ + org.opencontainers.image.documentation=https://github.com/0xMiden/node \ + org.opencontainers.image.source=https://github.com/0xMiden/node \ org.opencontainers.image.vendor=Miden \ org.opencontainers.image.licenses=MIT - ARG CREATED ARG VERSION ARG COMMIT @@ -43,6 +51,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 - # Miden node does not spawn sub-processes, so it can be used as the PID1 CMD miden-node diff --git a/bin/node/build.rs b/bin/node/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/bin/node/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 5cfbc78fcc..5d416ea8e5 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -86,7 +86,6 @@ impl BlockProducerCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, @@ -125,7 +124,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, @@ -149,7 +147,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: miden_protocol::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 22f1199a3f..9ca1872074 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -4,12 +4,10 @@ use std::time::Duration; use anyhow::Context; use miden_node_block_producer::BlockProducer; -use miden_node_ntx_builder::NetworkTransactionBuilder; use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; -use miden_node_validator::Validator; -use miden_protocol::block::BlockSigner; +use miden_node_validator::{Validator, ValidatorSigner}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; @@ -19,12 +17,13 @@ use url::Url; use super::{ENV_DATA_DIRECTORY, ENV_RPC_URL}; use crate::commands::{ BlockProducerConfig, + BundledValidatorConfig, DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_INSECURE_SECRET_KEY, - INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, + ValidatorKey, duration_to_human_readable_string, }; @@ -47,16 +46,9 @@ pub enum BundledCommand { /// Constructs the genesis block from the given toml file. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "FILE")] genesis_config_file: Option, - /// Insecure, hex-encoded validator secret key for development and testing purposes. - /// - /// If not provided, a predefined key is used. - #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", - default_value = INSECURE_VALIDATOR_KEY_HEX - )] - validator_insecure_secret_key: String, + /// Configuration for the Validator key used to sign genesis block. + #[command(flatten)] + validator_key: ValidatorKey, }, /// Runs all three node components in the same process. @@ -68,6 +60,10 @@ pub enum BundledCommand { #[arg(long = "rpc.url", env = ENV_RPC_URL, value_name = "URL")] rpc_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which the Store component should store the database and raw block data. #[arg(long = "data-directory", env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -78,6 +74,9 @@ pub enum BundledCommand { #[command(flatten)] ntx_builder: NtxBuilderConfig, + #[command(flatten)] + validator: BundledValidatorConfig, + /// Enables the exporting of traces for OpenTelemetry. /// /// This can be further configured using environment variables as defined in the official @@ -95,15 +94,6 @@ pub enum BundledCommand { value_name = "DURATION" )] grpc_timeout: Duration, - - /// Insecure, hex-encoded validator secret key for development and testing purposes. - #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", - default_value = INSECURE_VALIDATOR_KEY_HEX - )] - validator_insecure_secret_key: String, }, } @@ -114,14 +104,14 @@ impl BundledCommand { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } => { // Currently the bundled bootstrap is identical to the store's bootstrap. crate::commands::store::StoreCommand::Bootstrap { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } .handle() .await @@ -129,36 +119,37 @@ impl BundledCommand { }, BundledCommand::Start { rpc_url, + block_prover_url, data_directory, block_producer, ntx_builder, + validator, enable_otel: _, grpc_timeout, - validator_insecure_secret_key, } => { - let secret_key_bytes = hex::decode(validator_insecure_secret_key)?; - let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; Self::start( rpc_url, + block_prover_url, data_directory, - ntx_builder, block_producer, + ntx_builder, + validator, grpc_timeout, - signer, ) .await }, } } - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] async fn start( rpc_url: Url, + block_prover_url: Option, data_directory: PathBuf, - ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, + ntx_builder: NtxBuilderConfig, + validator: BundledValidatorConfig, grpc_timeout: Duration, - signer: impl BlockSigner + Send + Sync + 'static, ) -> anyhow::Result<()> { // Start listening on all gRPC urls so that inter-component connections can be created // before each component is fully started up. @@ -170,17 +161,19 @@ impl BundledCommand { .await .context("Failed to bind to RPC gRPC endpoint")?; - let block_producer_address = TcpListener::bind("127.0.0.1:0") - .await - .context("Failed to bind to block-producer gRPC endpoint")? - .local_addr() - .context("Failed to retrieve the block-producer's gRPC address")?; + let (block_producer_url, block_producer_address) = { + let socket_addr = TcpListener::bind("127.0.0.1:0") + .await + .context("Failed to bind to block-producer gRPC endpoint")? + .local_addr() + .context("Failed to retrieve the block-producer's gRPC address")?; + let url = Url::parse(&format!("http://{socket_addr}")) + .context("Failed to parse Block Producer URL")?; + (url, socket_addr) + }; - let validator_address = TcpListener::bind("127.0.0.1:0") - .await - .context("Failed to bind to validator gRPC endpoint")? - .local_addr() - .context("Failed to retrieve the validator's gRPC address")?; + // Validator URL is either specified remote, or generated local. + let (validator_url, validator_socket_address) = validator.to_addresses().await?; // Store addresses for each exposed API let store_rpc_listener = TcpListener::bind("127.0.0.1:0") @@ -212,6 +205,7 @@ impl BundledCommand { block_producer_listener: store_block_producer_listener, ntx_builder_listener: store_ntx_builder_listener, data_directory: data_directory_clone, + block_prover_url, grpc_timeout, } .serve() @@ -223,105 +217,113 @@ impl BundledCommand { let should_start_ntx_builder = !ntx_builder.disabled; // Start block-producer. The block-producer's endpoint is available after loading completes. - let block_producer_id = join_set - .spawn({ - let store_url = Url::parse(&format!("http://{store_block_producer_address}")) - .context("Failed to parse URL")?; - let validator_url = Url::parse(&format!("http://{validator_address}")) - .context("Failed to parse URL")?; - async move { - BlockProducer { - block_producer_address, - store_url, - validator_url, - batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, - batch_interval: block_producer.batch_interval, - block_interval: block_producer.block_interval, - max_batches_per_block: block_producer.max_batches_per_block, - max_txs_per_batch: block_producer.max_txs_per_batch, - grpc_timeout, - mempool_tx_capacity: block_producer.mempool_tx_capacity, + let block_producer_id = { + let validator_url = validator_url.clone(); + join_set + .spawn({ + let store_url = Url::parse(&format!("http://{store_block_producer_address}")) + .context("Failed to parse URL")?; + async move { + BlockProducer { + block_producer_address, + store_url, + validator_url, + batch_prover_url: block_producer.batch_prover_url, + batch_interval: block_producer.batch_interval, + block_interval: block_producer.block_interval, + max_batches_per_block: block_producer.max_batches_per_block, + max_txs_per_batch: block_producer.max_txs_per_batch, + grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, + } + .serve() + .await + .context("failed while serving block-producer component") } - .serve() - .await - .context("failed while serving block-producer component") - } - }) - .id(); + }) + .id() + }; - let validator_id = join_set - .spawn({ - async move { - Validator { - address: validator_address, + // Start RPC component. + let rpc_id = { + let block_producer_url = block_producer_url.clone(); + let validator_url = validator_url.clone(); + join_set + .spawn(async move { + let store_url = Url::parse(&format!("http://{store_rpc_address}")) + .context("Failed to parse URL")?; + Rpc { + listener: grpc_rpc, + store_url, + block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout, - signer, } .serve() .await - .context("failed while serving validator component") - } - }) - .id(); - - // Start RPC component. - let rpc_id = join_set - .spawn(async move { - let store_url = Url::parse(&format!("http://{store_rpc_address}")) - .context("Failed to parse URL")?; - let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; - let validator_url = Url::parse(&format!("http://{validator_address}")) - .context("Failed to parse URL")?; - Rpc { - listener: grpc_rpc, - store_url, - block_producer_url: Some(block_producer_url), - validator_url, - grpc_timeout, - } - .serve() - .await - .context("failed while serving RPC component") - }) - .id(); + .context("failed while serving RPC component") + }) + .id() + }; // Lookup table so we can identify the failed component. let mut component_ids = HashMap::from([ (store_id, "store"), (block_producer_id, "block-producer"), - (validator_id, "validator"), (rpc_id, "rpc"), ]); // Start network transaction builder. The endpoint is available after loading completes. - let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) - .context("Failed to parse URL")?; - if should_start_ntx_builder { - let validator_url = Url::parse(&format!("http://{validator_address}")) + let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) .context("Failed to parse URL")?; + let block_producer_url = block_producer_url.clone(); + let validator_url = validator_url.clone(); + + let builder_config = ntx_builder.into_builder_config( + store_ntx_builder_url, + block_producer_url, + validator_url, + &data_directory, + ); + let id = join_set .spawn(async move { - let block_producer_url = - Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; - NetworkTransactionBuilder::new( - store_ntx_builder_url, - block_producer_url, - validator_url, - ntx_builder.tx_prover_url, - ntx_builder.script_cache_size, - ) - .run() - .await - .context("failed while serving ntx builder component") + builder_config + .build() + .await + .context("failed to initialize ntx builder")? + .run() + .await + .context("failed while serving ntx builder component") }) .id(); component_ids.insert(id, "ntx-builder"); } + // Start the Validator if we have bound a socket. + if let Some(address) = validator_socket_address { + let secret_key_bytes = hex::decode(validator.validator_key)?; + let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; + let signer = ValidatorSigner::new_local(signer); + let id = join_set + .spawn({ + async move { + Validator { + address, + grpc_timeout, + signer, + data_directory, + } + .serve() + .await + .context("failed while serving validator component") + } + }) + .id(); + component_ids.insert(id, "validator"); + } + // SAFETY: The joinset is definitely not empty. let component_result = join_set.join_next_with_id().await.unwrap(); diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 7e8fa7e69f..ec476bd49a 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -1,12 +1,19 @@ +use std::net::SocketAddr; use std::num::NonZeroUsize; +use std::path::{Path, PathBuf}; use std::time::Duration; +use anyhow::Context; use miden_node_block_producer::{ DEFAULT_BATCH_INTERVAL, DEFAULT_BLOCK_INTERVAL, DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH, }; +use miden_node_validator::ValidatorSigner; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; +use tokio::net::TcpListener; use url::Url; pub mod block_producer; @@ -36,7 +43,9 @@ const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE"; -const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; +const ENV_VALIDATOR_KEY: &str = "MIDEN_NODE_VALIDATOR_KEY"; +const ENV_VALIDATOR_KMS_KEY_ID: &str = "MIDEN_NODE_VALIDATOR_KMS_KEY_ID"; +const ENV_NTX_DATA_DIRECTORY: &str = "MIDEN_NODE_NTX_DATA_DIRECTORY"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); @@ -47,7 +56,95 @@ fn duration_to_human_readable_string(duration: Duration) -> String { humantime::format_duration(duration).to_string() } -/// Configuration for the Network Transaction Builder component +/// Configuration for the Validator key used to sign blocks. +/// +/// Used by the Validator command and the genesis bootstrap command. +#[derive(clap::Args)] +#[group(required = false, multiple = false)] +pub struct ValidatorKey { + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + /// + /// Cannot be used with `validator.key.kms-id`. + #[arg( + long = "validator.key.hex", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX, + )] + validator_key: String, + /// Key ID for the KMS key used by validator to sign blocks. + /// + /// Cannot be used with `validator.key.hex`. + #[arg( + long = "validator.key.kms-id", + env = ENV_VALIDATOR_KMS_KEY_ID, + value_name = "VALIDATOR_KMS_KEY_ID", + )] + validator_kms_key_id: Option, +} + +impl ValidatorKey { + /// Consumes the validator key configuration and returns a KMS or local key signer depending on + /// the supplied configuration. + pub async fn into_signer(self) -> anyhow::Result { + if let Some(kms_key_id) = self.validator_kms_key_id { + // Use KMS key ID to create a ValidatorSigner. + let signer = ValidatorSigner::new_kms(kms_key_id).await?; + Ok(signer) + } else { + // Use hex-encoded key to create a ValidatorSigner. + let signer = SecretKey::read_from_bytes(hex::decode(self.validator_key)?.as_ref())?; + let signer = ValidatorSigner::new_local(signer); + Ok(signer) + } + } +} + +/// Configuration for the Validator component when run in the bundled mode. +#[derive(clap::Args)] +pub struct BundledValidatorConfig { + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// Only used when the Validator URL argument is not set. + #[arg( + long = "validator.key", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_key: String, + + /// The remote Validator's gRPC URL. If unset, will default to running a Validator + /// in-process. If set, the insecure key argument is ignored. + #[arg(long = "validator.url", env = ENV_VALIDATOR_URL, value_name = "URL")] + validator_url: Option, +} + +impl BundledValidatorConfig { + /// Converts the [`BundledValidatorConfig`] into a URL and an optional [`SocketAddr`]. + /// + /// If the `validator_url` is set, it returns the URL and `None` for the [`SocketAddr`]. + /// + /// If `validator_url` is not set, it binds to a random port on localhost, creates a URL, + /// and returns the URL and the bound [`SocketAddr`]. + async fn to_addresses(&self) -> anyhow::Result<(Url, Option)> { + if let Some(url) = &self.validator_url { + Ok((url.clone(), None)) + } else { + let socket_addr = TcpListener::bind("127.0.0.1:0") + .await + .context("Failed to bind to validator gRPC endpoint")? + .local_addr() + .context("Failed to retrieve the validator's gRPC address")?; + let url = Url::parse(&format!("http://{socket_addr}")) + .context("Failed to parse Validator URL")?; + Ok((url, Some(socket_addr))) + } + } +} + +/// Configuration for the Network Transaction Builder component. #[derive(clap::Args)] pub struct NtxBuilderConfig { /// Disable spawning the network transaction builder. @@ -68,6 +165,9 @@ pub struct NtxBuilderConfig { )] pub ticker_interval: Duration, + /// Number of note scripts to cache locally. + /// + /// Note scripts not in cache must first be retrieved from the store. #[arg( long = "ntx-builder.script-cache-size", env = ENV_NTX_SCRIPT_CACHE_SIZE, @@ -75,6 +175,38 @@ pub struct NtxBuilderConfig { default_value_t = DEFAULT_NTX_SCRIPT_CACHE_SIZE )] pub script_cache_size: NonZeroUsize, + + /// Directory for the ntx-builder's persistent database. + /// + /// If not set, defaults to the node's data directory. + #[arg(long = "ntx-builder.data-directory", env = ENV_NTX_DATA_DIRECTORY, value_name = "DIR")] + pub ntx_data_directory: Option, +} + +impl NtxBuilderConfig { + /// Converts this CLI config into the ntx-builder's internal config. + /// + /// The `node_data_directory` is used as the default location for the ntx-builder's database + /// if `--ntx-builder.data-directory` is not explicitly set. + pub fn into_builder_config( + self, + store_url: Url, + block_producer_url: Url, + validator_url: Url, + node_data_directory: &Path, + ) -> miden_node_ntx_builder::NtxBuilderConfig { + let data_dir = self.ntx_data_directory.unwrap_or_else(|| node_data_directory.to_path_buf()); + let database_filepath = data_dir.join("ntx-builder.sqlite3"); + + miden_node_ntx_builder::NtxBuilderConfig::new( + store_url, + block_producer_url, + validator_url, + database_filepath, + ) + .with_tx_prover_url(self.tx_prover_url) + .with_script_cache_size(self.script_cache_size) + } } /// Configuration for the Block Producer component @@ -103,11 +235,6 @@ pub struct BlockProducerConfig { #[arg(long = "batch-prover.url", env = ENV_BATCH_PROVER_URL, value_name = "URL")] pub batch_prover_url: Option, - /// The remote block prover's gRPC url. If unset, will default to running a prover - /// in-process which is expensive. - #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] - pub block_prover_url: Option, - /// The number of transactions per batch. #[arg( long = "max-txs-per-batch", diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 9dd311368f..14b266147e 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -5,8 +5,8 @@ use anyhow::Context; use miden_node_store::Store; use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; use miden_node_utils::grpc::UrlExt; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; -use miden_protocol::utils::Deserializable; +use miden_node_utils::signer::BlockSigner; +use miden_node_validator::ValidatorSigner; use url::Url; use super::{ @@ -17,14 +17,14 @@ use super::{ }; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_INSECURE_SECRET_KEY, - INSECURE_VALIDATOR_KEY_HEX, + ValidatorKey, duration_to_human_readable_string, }; -#[allow(clippy::large_enum_variant, reason = "single use enum")] +#[expect(clippy::large_enum_variant, reason = "single use enum")] #[derive(clap::Subcommand)] pub enum StoreCommand { /// Bootstraps the blockchain database with the genesis block. @@ -43,16 +43,9 @@ pub enum StoreCommand { /// Use the given configuration file to construct the genesis state from. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "GENESIS_CONFIG")] genesis_config_file: Option, - /// Insecure, hex-encoded validator secret key for development and testing purposes. - /// - /// If not provided, a predefined key is used. - #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", - default_value = INSECURE_VALIDATOR_KEY_HEX - )] - validator_insecure_secret_key: String, + /// Configuration for the Validator key used to sign genesis block. + #[command(flatten)] + validator_key: ValidatorKey, }, /// Starts the store component. @@ -72,6 +65,10 @@ pub enum StoreCommand { #[arg(long = "block-producer.url", env = ENV_STORE_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which to store the database and raw block data. #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -104,17 +101,21 @@ impl StoreCommand { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, - } => Self::bootstrap( - &data_directory, - &accounts_directory, - genesis_config_file.as_ref(), - validator_insecure_secret_key, - ), + validator_key, + } => { + Self::bootstrap( + &data_directory, + &accounts_directory, + genesis_config_file.as_ref(), + validator_key, + ) + .await + }, StoreCommand::Start { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, enable_otel: _, grpc_timeout, @@ -123,6 +124,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, grpc_timeout, ) @@ -143,6 +145,7 @@ impl StoreCommand { rpc_url: Url, ntx_builder_url: Url, block_producer_url: Url, + block_prover_url: Option, data_directory: PathBuf, grpc_timeout: Duration, ) -> anyhow::Result<()> { @@ -169,6 +172,7 @@ impl StoreCommand { Store { rpc_listener, + block_prover_url, ntx_builder_listener, block_producer_listener, data_directory, @@ -179,28 +183,22 @@ impl StoreCommand { .context("failed while serving store component") } - fn bootstrap( + async fn bootstrap( data_directory: &Path, accounts_directory: &Path, genesis_config: Option<&PathBuf>, - validator_insecure_secret_key: String, + validator_key: ValidatorKey, ) -> anyhow::Result<()> { - // Decode the validator key. - let signer = SecretKey::read_from_bytes(&hex::decode(validator_insecure_secret_key)?)?; - // Parse genesis config (or default if not given). let config = genesis_config .map(|file_path| { - let toml_str = fs_err::read_to_string(file_path)?; - GenesisConfig::read_toml(toml_str.as_str()).with_context(|| { + GenesisConfig::read_toml_file(file_path).with_context(|| { format!("failed to parse genesis config from file {}", file_path.display()) }) }) .transpose()? .unwrap_or_default(); - let (genesis_state, secrets) = config.into_state(signer)?; - // Create directories if they do not already exist. for directory in &[accounts_directory, data_directory] { if fs_err::exists(directory)? { @@ -223,7 +221,41 @@ impl StoreCommand { } } - // Write the accounts to disk + // Bootstrap with KMS key or local key. + let signer = validator_key.into_signer().await?; + match signer { + ValidatorSigner::Kms(signer) => { + Self::bootstrap_accounts_and_store( + config, + signer, + accounts_directory, + data_directory, + ) + .await + }, + ValidatorSigner::Local(signer) => { + Self::bootstrap_accounts_and_store( + config, + signer, + accounts_directory, + data_directory, + ) + .await + }, + } + } + + /// Builds the genesis state of the chain, writes accounts to file, and bootstraps the store. + async fn bootstrap_accounts_and_store( + config: GenesisConfig, + signer: impl BlockSigner, + accounts_directory: &Path, + data_directory: &Path, + ) -> anyhow::Result<()> { + // Build genesis state with the provided signer. + let (genesis_state, secrets) = config.into_state(signer)?; + + // Write accounts to file. for item in secrets.as_account_files(&genesis_state) { let AccountFileWithName { account_file, name } = item?; let accountpath = accounts_directory.join(name); @@ -236,6 +268,7 @@ impl StoreCommand { account_file.write(accountpath)?; } - Store::bootstrap(genesis_state, data_directory) + // Bootstrap store. + Store::bootstrap(genesis_state, data_directory).await } } diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index f543be3013..ef3d9363a9 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -1,16 +1,20 @@ +use std::net::SocketAddr; +use std::path::PathBuf; use std::time::Duration; use anyhow::Context; use miden_node_utils::grpc::UrlExt; -use miden_node_validator::Validator; +use miden_node_validator::{Validator, ValidatorSigner}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; use url::Url; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_DATA_DIRECTORY, ENV_ENABLE_OTEL, - ENV_VALIDATOR_INSECURE_SECRET_KEY, + ENV_VALIDATOR_KEY, + ENV_VALIDATOR_KMS_KEY_ID, ENV_VALIDATOR_URL, INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, @@ -40,29 +44,79 @@ pub enum ValidatorCommand { )] grpc_timeout: Duration, + /// Directory in which to store the validator's data. + #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] + data_directory: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. /// /// If not provided, a predefined key is used. - #[arg(long = "insecure.secret-key", env = ENV_VALIDATOR_INSECURE_SECRET_KEY, value_name = "INSECURE_SECRET_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] - insecure_secret_key: String, + /// + /// Cannot be used with `key.kms-id`. + #[arg( + long = "key.hex", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX, + group = "key" + )] + validator_key: String, + + /// Key ID for the KMS key used by validator to sign blocks. + /// + /// Cannot be used with `key.hex`. + #[arg( + long = "key.kms-id", + env = ENV_VALIDATOR_KMS_KEY_ID, + value_name = "VALIDATOR_KMS_KEY_ID", + group = "key" + )] + kms_key_id: Option, }, } impl ValidatorCommand { + /// Runs the validator command. pub async fn handle(self) -> anyhow::Result<()> { let Self::Start { - url, grpc_timeout, insecure_secret_key, .. + url, + grpc_timeout, + validator_key, + data_directory, + kms_key_id, + .. } = self; let address = url.to_socket().context("Failed to extract socket address from validator URL")?; - let signer = SecretKey::read_from_bytes(hex::decode(insecure_secret_key)?.as_ref())?; + // Run validator with KMS key backend if key id provided. + if let Some(kms_key_id) = kms_key_id { + let signer = ValidatorSigner::new_kms(kms_key_id).await?; + Self::serve(address, grpc_timeout, signer, data_directory).await + } else { + let signer = SecretKey::read_from_bytes(hex::decode(validator_key)?.as_ref())?; + let signer = ValidatorSigner::new_local(signer); + Self::serve(address, grpc_timeout, signer, data_directory).await + } + } - Validator { address, grpc_timeout, signer } - .serve() - .await - .context("failed while serving validator component") + /// Runs the validator component until failure. + async fn serve( + address: SocketAddr, + grpc_timeout: Duration, + signer: ValidatorSigner, + data_directory: PathBuf, + ) -> anyhow::Result<()> { + Validator { + address, + grpc_timeout, + signer, + data_directory, + } + .serve() + .await + .context("failed while serving validator component") } pub fn is_open_telemetry_enabled(&self) -> bool { diff --git a/bin/remote-prover/.env b/bin/remote-prover/.env index 05593e6989..b7191203d7 100644 --- a/bin/remote-prover/.env +++ b/bin/remote-prover/.env @@ -1,32 +1,6 @@ -# For more info use -h on the relevant commands: -# miden-remote-prover start-worker -h -# miden-remote-prover start-proxy -h +# For more info consult the help output: `miden-remote-prover --help` -# Proxy ############################ -# Port of the proxy -MRP_PORT=8082 -# Port to add / remove workers -MRP_CONTROL_PORT=8083 -# Uncomment the following line to enable Prometheus metrics on port 6192 -# MRP_METRICS_PORT=6192 -MRP_TIMEOUT=100s -MRP_CONNECTION_TIMEOUT=10s -MRP_MAX_QUEUE_ITEMS=10 -MRP_MAX_RETRIES_PER_REQUEST=1 -MRP_MAX_REQ_PER_SEC=5 -MRP_AVAILABLE_WORKERS_POLLING_INTERVAL=20ms -MRP_HEALTH_CHECK_INTERVAL=1s -MRP_ENABLE_METRICS=false -MRP_PROOF_TYPE=transaction -MRP_PROXY_WORKERS_LIST=127.0.0.1:50051 -MRP_GRACE_PERIOD=20s -MRP_GRACEFUL_SHUTDOWN_TIMEOUT=5s -RUST_LOG=info -#################################### - -# Worker ########################### -# Use 127.0.0.1 instead of 0.0.0.0 -MRP_WORKER_LOCALHOST=false -MRP_WORKER_PORT=50051 -MRP_WORKER_PROOF_TYPE=transaction -#################################### +MIDEN_PROVER_PORT=8082 +MIDEN_PROVER_KIND=transaction +MIDEN_PROVER_TIMEOUT=100s +MIDEN_PROVER_CAPACITY=10 diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 85bc355f79..28201d5b2e 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -1,6 +1,6 @@ [package] authors.workspace = true -description = "Miden blockchain remote prover" +description = "Miden remote prover" edition.workspace = true homepage.workspace = true keywords = ["miden", "prover", "remote"] @@ -11,61 +11,53 @@ repository.workspace = true rust-version.workspace = true version.workspace = true -[[bin]] -name = "miden-remote-prover" -path = "src/main.rs" - -[features] -concurrent = ["miden-tx/concurrent"] -default = ["concurrent"] - [lints] workspace = true [dependencies] -anyhow = { workspace = true } -async-trait = { version = "0.1" } -axum = { version = "0.8" } -bytes = { version = "1.0" } -clap = { features = ["env"], workspace = true } -http = { workspace = true } -humantime = { workspace = true } -miden-block-prover = { workspace = true } -miden-node-proto = { workspace = true } -miden-node-utils = { workspace = true } -miden-protocol = { features = ["std"], workspace = true } -miden-tx = { features = ["std"], workspace = true } -miden-tx-batch-prover = { features = ["std"], workspace = true } -opentelemetry = { version = "0.31" } -pingora = { features = ["lb"], version = "0.6" } -pingora-core = { version = "0.6" } -pingora-limits = { version = "0.6" } -pingora-proxy = { version = "0.6" } -prometheus = { version = "0.14" } -prost = { default-features = false, features = ["derive"], workspace = true } -reqwest = { version = "0.12" } -semver = { version = "1.0" } -serde = { features = ["derive"], version = "1.0" } -serde_qs = { version = "0.15" } -thiserror = { workspace = true } -tokio = { features = ["full"], workspace = true } -tokio-stream = { features = ["net"], version = "0.1" } -tonic = { default-features = false, features = ["codegen", "router", "transport"], version = "0.14" } -tonic-health = { version = "0.14" } -tonic-prost = { workspace = true } -tonic-web = { version = "0.14" } -tower-http = { features = ["trace"], workspace = true } -tracing = { workspace = true } -tracing-opentelemetry = { version = "0.32" } -uuid = { features = ["v4"], version = "1.16" } +anyhow = { workspace = true } +async-trait = { version = "0.1" } +clap = { features = ["env"], workspace = true } +http = { workspace = true } +humantime = { workspace = true } +miden-block-prover = { workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-utils = { workspace = true } +miden-protocol = { features = ["std"], workspace = true } +miden-tx = { features = ["concurrent", "std"], workspace = true } +miden-tx-batch-prover = { features = ["std"], workspace = true } +opentelemetry = { version = "0.31" } +prost = { default-features = false, features = ["derive"], workspace = true } +tokio = { features = ["full"], workspace = true } +tokio-stream = { features = ["net"], version = "0.1" } +tonic = { default-features = false, features = ["codegen", "router", "transport"], version = "0.14" } +tonic-health = { version = "0.14" } +tonic-prost = { workspace = true } +tonic-reflection = { workspace = true } +tonic-web = { version = "0.14" } +tower-http = { features = ["trace"], workspace = true } +tracing = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } miden-protocol = { features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["testing"], workspace = true } +serial_test = { version = "3" } [build-dependencies] -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { features = ["fancy"], version = "7.5" } -tonic-prost-build = { workspace = true } +build-rs = { workspace = true } +fs-err = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { features = ["fancy"], version = "7.5" } +tonic-prost-build = { workspace = true } + +[package.metadata.cargo-machete] +ignored = [ + "http", + "prost", + "tonic-prost", # used in generated OUT_DIR code +] diff --git a/bin/remote-prover/README.md b/bin/remote-prover/README.md index 476e2293fe..24a6f9f24f 100644 --- a/bin/remote-prover/README.md +++ b/bin/remote-prover/README.md @@ -1,64 +1,67 @@ # Miden remote prover -A service for generating Miden proofs on-demand. The binary enables spawning workers and a proxy for Miden's remote prover. It currently supports proving individual transactions, transaction batches, and blocks. +A gRPC server which provides a service for proving either transactions, batches or blocks for the Miden blockchain. -A worker is a gRPC service that can receive transaction witnesses, proposed batches, or proposed blocks, prove them, and return the generated proofs. It can handle only one request at a time and will return an error if it is already in use. Each worker is specialized on startup to handle exactly one type of proof requests - transactions, batches, or blocks. +This enables weaker devices to offload the proof generation to a beefy remote server running this service. -The proxy uses [Cloudflare's Pingora crate](https://crates.io/crates/pingora), which provides features to create a modular proxy. It is meant to handle multiple workers with a queue, assigning a worker to each request and retrying if the worker is not available. Further information about Pingora and its features can be found in the [official GitHub repository](https://github.com/cloudflare/pingora). +The implementation provides a configurable request queue and proves one request at a time in FIFO order. This is not intended to cover +complex proxy setups nor load-balancing, but can instead be used as a starting point for more advanced setups. -## Debian Installation +The gRPC specification can be found in the [Miden repository](https://github.com/0xMiden/node/blob/main/proto/proto/remote_prover.proto). +Ensure you are viewing the appropriate version tag or commit. -#### Prover - -Install the Debian package: +## Quick start ```bash -set -e +# Install the binary. +cargo install miden-remote-prover --locked -sudo wget https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-v0.8-arm64.deb -O prover.deb -sudo wget -q -O - https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-v0.8-arm64.deb.checksum | awk '{print $1}' | sudo tee prover.checksum -sudo sha256sum prover.deb | awk '{print $1}' > prover.sha256 -sudo diff prover.sha256 prover.checksum -sudo dpkg -i prover.deb -sudo rm prover.deb +# and start as a transaction prover. +miden-remote-prover \ + --kind transaction \ # Specify the kind of proof to generate (transaction, batch, or block) + --port 50051 ``` -Edit the configuration file `/lib/systemd/system/miden-prover.service.env` +In a separate terminal, inspect the available services using grpcurl and reflection. -Run the service: +```bash +grpcurl -plaintext localhost:50051 list +``` + +or query the status of the prover. ```bash -sudo systemctl daemon-reload -sudo systemctl enable miden-prover -sudo systemctl start miden-prover +grpcurl -plaintext localhost:50051 remote_prover.WorkerStatusApi/Status ``` -#### Prover Proxy +## Installation + +### Debian package + +Install the Debian package: ```bash set -e -sudo wget https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-proxy-v0.8-arm64.deb -O prover-proxy.deb -sudo wget -q -O - https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-proxy-v0.8-arm64.deb.checksum | awk '{print $1}' | sudo tee prover-proxy.checksum -sudo sha256sum prover-proxy.deb | awk '{print $1}' > prover-proxy.sha256 -sudo diff prover-proxy.sha256 prover-proxy.checksum -sudo dpkg -i prover-proxy.deb -sudo rm prover-proxy.deb +sudo wget https://github.com/0xMiden/node/releases/download/v0.8/miden-prover-v0.8-arm64.deb -O prover.deb +sudo wget -q -O - https://github.com/0xMiden/node/releases/download/v0.8/miden-prover-v0.8-arm64.deb.checksum | awk '{print $1}' | sudo tee prover.checksum +sudo sha256sum prover.deb | awk '{print $1}' > prover.sha256 +sudo diff prover.sha256 prover.checksum +sudo dpkg -i prover.deb +sudo rm prover.deb ``` -Edit the configuration file `/lib/systemd/system/miden-prover-proxy.service.env` - -Edit the service file to specify workers `/lib/systemd/system/miden-prover-proxy.service` +Edit the configuration file `/lib/systemd/system/miden-prover.service.env` Run the service: ```bash sudo systemctl daemon-reload -sudo systemctl enable miden-prover-proxy -sudo systemctl start miden-prover-proxy +sudo systemctl enable miden-prover +sudo systemctl start miden-prover ``` -## Source Installation +### From source To build the service from a local version, from the root of the workspace you can run: @@ -68,274 +71,73 @@ make install-remote-prover The CLI can be installed from the source code using specific git revisions with `cargo install` or from crates.io with `cargo install miden-remote-prover`. -## Worker +## Configuration -To start the worker service you will need to run: +Quick start: ```bash -miden-remote-prover start-worker --port 8082 --prover-type transaction +miden-remote-prover --kind transaction ``` -This will spawn a worker using the port defined in the command option. The host will be 0.0.0.0 by default, or 127.0.0.1 if the --localhost flag is used. In case that the port is not provided, it will default to `50051`. This command will start a worker that can handle transaction and batch proving requests. - -The `--prover-type` flag is required and specifies which type of proof the worker will handle. The available options are: - -- `transaction`: For transaction proofs -- `batch`: For batch proofs -- `block`: For block proofs - -Each worker can only handle one type of proof. If you need to handle multiple proof types, you should start multiple workers, each with a different proof type. Additionally, you can use the `--localhost` flag to bind to 127.0.0.1 instead of 0.0.0.0. - -### Worker Configuration - -The worker can be configured using the following environment variables: - -| Variable | Description | Default | -|---------------------------|---------------------------------|---------------| -| `MRP_WORKER_LOCALHOST` | Use localhost (127.0.0.1) | `false` | -| `MRP_WORKER_PORT` | The port number for the worker | `50051` | -| `MRP_WORKER_PROOF_TYPE` | The supported prover type | `transaction` | - -For example: - -```bash -export MRP_WORKER_LOCALHOST="true" -export MRP_WORKER_PORT="8082" -export MRP_WORKER_PROOF_TYPE="block" -miden-remote-prover start-worker -``` - -## Proxy - -To start the proxy service, you will need to run: - -```bash -miden-remote-prover start-proxy --prover-type transaction --workers [worker1],[worker2],...,[workerN] -``` - -For example: - -```bash -miden-remote-prover start-proxy --prover-type transaction --workers 0.0.0.0:8084,0.0.0.0:8085 -``` - -This command will start the proxy using the workers passed as arguments. The workers should be in the format `host:port`. Another way to specify the workers is by using the `MRP_PROXY_WORKERS_LIST` environment variable, which can be set to a comma-separated list of worker addresses. For example: +The prover can be further configured from the command line or using environment variables as per the help message: ```bash -export MRP_PROXY_WORKERS_LIST="0.0.0.0:8084,0.0.0.0:8085" -``` - -If no workers are passed, the proxy will start without any workers and will not be able to handle any requests until one is added through the `miden-remote-prover add-worker` command. +> miden-remote-prover --help -The `--prover-type` flag is required and specifies which type of proof the proxy will handle. The available options are: +Usage: miden-remote-prover [OPTIONS] --kind -- `transaction`: For transaction proofs -- `batch`: For batch proofs -- `block`: For block proofs +Options: + --port + The port the gRPC server will be hosted on -The proxy can only handle one type of proof at a time. When you add workers to the proxy, it will check their supported proof type. Workers that support a different proof type than the proxy will be marked as unhealthy and will not be used for proving requests. + [env: MIDEN_PROVER_PORT=] + [default: 50051] -For example, if you start a proxy with `--prover-type transaction` and add these workers: + --kind + The proof type that the prover will be handling -- Worker 1: Transaction proofs (Healthy) -- Worker 2: Batch proofs (Unhealthy - incompatible proof type) -- Worker 3: Block proofs (Unhealthy - incompatible proof type) + [env: MIDEN_PROVER_KIND=] + [possible values: transaction, batch, block] -Only Worker 1 will be used for proving requests, while Workers 2 and 3 will be marked as unhealthy due to incompatible proof types. + --timeout + Maximum time allowed for a proof request to complete. Once exceeded, the request is aborted -You can customize the proxy service by setting environment variables. Possible customizations can be found by running `miden-remote-prover start-proxy --help`. + [env: MIDEN_PROVER_TIMEOUT=] + [default: 60s] -An example `.env` file is provided in the crate's root directory. To use the variables from a file in any Unix-like operating system, you can run `source `. + --capacity + Maximum number of concurrent proof requests that the prover will allow. -At the moment, when a worker added to the proxy stops working and can not connect to it for a request, the connection is marked as retriable meaning that the proxy will try reaching another worker. The number of retries is configurable via the `MRP_MAX_RETRIES_PER_REQUEST` environmental variable. + Note that the prover only proves one request at a time; the rest are queued. + This capacity is used to limit the number of requests that can be queued at any given time, + and includes the one request that is currently being processed. -## Updating workers on a running proxy + [env: MIDEN_PROVER_CAPACITY=] + [default: 1] -To update the workers on a running proxy, two commands are provided: `add-workers` and `remove-workers`. These commands will update the workers on the proxy and will not require a restart. To use these commands, you will need to run: - -```bash -miden-remote-prover add-workers --control-port [worker1],[worker2],...,[workerN] -miden-remote-prover remove-workers --control-port [worker1],[worker2],...,[workerN] + -h, --help + Print help (see a summary with '-h') ``` -For example: +## Status, health and monitoring -```bash -# To add 0.0.0.0:8085 and 200.58.70.4:50051 to the workers list: -miden-remote-prover add-workers --control-port 8083 0.0.0.0:8085,200.58.70.4:50051 -# To remove 158.12.12.3:8080 and 122.122.6.6:50051 from the workers list: -miden-remote-prover remove-workers --control-port 8083 158.12.12.3:8080,122.122.6.6:50051 -``` +The server implements the following health and status related gRPC services: -These commands can receive the list of workers to update as a comma-separated list of addresses through the `MRP_PROXY_WORKERS_LIST` environment variable, or as command-line arguments: +- [gRPC Health Check](https://grpc.io/docs/guides/health-checking/) +- [gRPC Reflection](https://grpc.io/docs/guides/reflection/) +- [WorkerStatusApi](https://github.com/0xMiden/node/blob/main/proto/proto/remote_prover.proto) -```bash -export MRP_PROXY_WORKERS_LIST="0.0.0.0:8085,200.58.70.4:50051" -miden-remote-prover add-workers --control-port 8083 -miden-remote-prover remove-workers --control-port 8083 -``` +The server supports OpenTelemetry traces which can be configured using the environment variables specified in the OpenTelemetry documentation. -The `--control-port` flag is required to specify the port where the proxy is listening for updates. The workers are passed as arguments in the format `host:port`. The port can be specified via the `MRP_CONTROL_PORT` environment variable. For example: +For example, to send the traces to [HoneyComb](https://www.honeycomb.io/): ```bash -export MRP_CONTROL_PORT="8083" -miden-remote-prover add-workers 0.0.0.0:8085 +OTEL_SERVICE_NAME=miden-remote-prover +OTEL_EXPORTER_OTLP_ENDPOINT=https://api.honeycomb.io +OTEL_EXPORTER_OTLP_HEADERS=x-honeycomb-team= ``` -Note that, in order to update the workers, the proxy must be running in the same computer as the command is being executed because it will check if the client address is localhost to avoid any security issues. - -### Health check - -The worker service implements the [gRPC Health Check](https://grpc.io/docs/guides/health-checking/) standard, and includes the methods described in this [official proto file](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto). - -The proxy service uses this health check to determine if a worker is available to receive requests. If a worker is not available, it will be removed from the set of workers that the proxy can use to send requests. - -### Status check - -The worker service implements a custom status check that returns information about the worker's current state and supported proof type. The proxy service uses this status check to determine if a worker is available to receive requests and if it supports the required proof type. If a worker is not available or doesn't support the required proof type, it will be removed from the set of workers that the proxy can use to send requests. - -The status check returns: - -- Whether the worker is ready to process requests -- The type of proofs the worker supports (transaction, batch, or block proofs) -- The version of the worker - -### Proxy Status Endpoint - -The proxy service exposes a gRPC status endpoint that provides information about the current state of the proxy and its workers. This endpoint implements the `ProxyStatusApi` service defined in `proxy_status.proto`. - -#### gRPC Service Definition - -The status service provides the following method: - -- `Status(ProxyStatusRequest) -> ProxyStatusResponse`: Returns the current status of the proxy and all its workers - -#### Response Format - -The gRPC response includes the following information: - -- `version`: The version of the proxy -- `supported_proof_type`: The type of proof that the proxy supports (`TRANSACTION`, `BATCH`, or `BLOCK`) -- `workers`: A list of workers with their status information - -Each worker status includes: - -- `address`: The worker's network address -- `version`: The worker's version -- `status`: The worker's health status (`UNKNOWN`, `HEALTHY`, or `UNHEALTHY`) - -#### Example Usage - -You can query the status endpoint using a gRPC client. For example, using `grpcurl`: - -```bash -# Assuming the proxy is running on port 8084 -grpcurl -plaintext -import-path ./proto -proto proxy_status.proto \ - -d '{}' localhost:8084 proxy_status.ProxyStatusApi.Status -``` - -Example response: - -```json -{ - "version": "0.8.0", - "supported_proof_type": "TRANSACTION", - "workers": [ - { - "address": "0.0.0.0:50051", - "version": "0.8.0", - "status": "UNHEALTHY" - }, - { - "address": "0.0.0.0:50052", - "version": "0.8.0", - "status": "HEALTHY" - } - ] -} -``` - -The status endpoint is integrated into the main proxy service and uses the same port as the proxy. The status information is automatically updated during health checks, ensuring it reflects the current state of all workers. - -## Logging and Tracing - -The service uses the [`tracing`](https://docs.rs/tracing/latest/tracing/) crate for both logging and distributed tracing, providing structured, high-performance logs and trace data. - -By default, logs are written to `stdout` and the default logging level is `info`. This can be changed via the `RUST_LOG` environment variable. For example: - -``` -export RUST_LOG=debug -``` - -For tracing, we use OpenTelemetry protocol. By default, traces are exported to the endpoint specified by `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable. To consume and visualize these traces we can use Jaeger or any other OpenTelemetry compatible consumer. - -The simplest way to install Jaeger is by using a [Docker](https://www.docker.com/) container. To do so, run: - -```bash -docker run -d -p4317:4317 -p16686:16686 jaegertracing/all-in-one:latest -``` - -Then access the Jaeger UI at `http://localhost:16686/`. - -If Docker is not an option, Jaeger can also be set up directly on your machine or hosted in the cloud. See the [Jaeger documentation](https://www.jaegertracing.io/docs/) for alternative installation methods. - -## Metrics - -The proxy includes a service that can optionally expose metrics to be consumed by [Prometheus](https://prometheus.io/docs/introduction/overview/). This service is enabled by specifying a metrics port. - -### Enabling Prometheus Metrics - -To enable Prometheus metrics, simply specify a port on which to expose the metrics. This can be done via environment variables or command-line arguments. - -#### Using Environment Variables - -Set the following environment variable: - -```bash -export MRP_METRICS_PORT=6192 # Set to enable metrics on port 6192 -``` - -To disable metrics, simply don't set the MRP_METRICS_PORT environment variable. - -#### Using Command-Line Arguments - -Specify a metrics port using the `--metrics-port` flag when starting the proxy: - -```bash -miden-remote-prover start-proxy --metrics-port 6192 [worker1] [worker2] ... [workerN] -``` - -If you don't specify a metrics port, metrics will be disabled. - -When enabled, the Prometheus metrics will be available at `http://0.0.0.0:` (e.g., `http://0.0.0.0:6192`). - -The metrics architecture works by having the proxy expose metrics at an endpoint (`/metrics`) in a format Prometheus can read. Prometheus periodically scrapes this endpoint, adds timestamps to the metrics, and stores them in its time-series database. Then, we can use tools like Grafana to query Prometheus and visualize these metrics in configurable dashboards. - -The simplest way to install Prometheus and Grafana is by using Docker containers. To do so, run: - -```bash -docker run \ - -d \ - -p 9090:9090 \ - -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \ - prom/prometheus - -docker run -d -p 3000:3000 --name grafana grafana/grafana-enterprise:latest -``` - -In case that Docker is not an option, Prometheus and Grafana can also be set up directly on your machine or hosted in the cloud. See the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/getting_started/) and [Grafana documentation](https://grafana.com/docs/grafana/latest/setup-grafana/) for alternative installation methods. - -A prometheus configuration file is provided in this repository, you will need to modify the `scrape_configs` section to include the URL of the proxy service (e.g., `http://0.0.0.0:6192`). - -Then, to add the new Prometheus collector as a datasource for Grafana, you can [follow this tutorial](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/existing-datasource/). A Grafana dashboard under the name `proxy_grafana_dashboard.json` is provided, see this [link](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/import-dashboards/) to import it. Otherwise, you can [create your own dashboard](https://grafana.com/docs/grafana/latest/getting-started/build-first-dashboard/) using the metrics provided by the proxy and export it by following this [link](https://grafana.com/docs/grafana/latest/dashboards/share-dashboards-panels/#export-a-dashboard-as-json). - -## Features - -Description of this crate's feature: - -| Features | Description | -| ------------ | ------------------------------------------------------ | -| `concurrent` | Enables concurrent code to speed up runtime execution. | +A self-hosted alternative is [Jaeger](https://www.jaegertracing.io/). ## License diff --git a/bin/remote-prover/build.rs b/bin/remote-prover/build.rs index f9b2eaafb3..0d9f0f89d1 100644 --- a/bin/remote-prover/build.rs +++ b/bin/remote-prover/build.rs @@ -1,27 +1,27 @@ +use std::path::Path; + +use fs_err as fs; use miden_node_proto_build::remote_prover_api_descriptor; -use miette::IntoDiagnostic; +use miette::{IntoDiagnostic, WrapErr}; use tonic_prost_build::FileDescriptorSet; -/// Defines whether the build script should generate files in `/src`. -/// -/// The docs.rs build pipeline has a read-only filesystem, so we have to avoid writing to `src`, -/// otherwise the docs will fail to build there. Note that writing to `OUT_DIR` is fine. -const BUILD_GENERATED_FILES_IN_SRC: bool = option_env!("BUILD_PROTO").is_some(); - -const GENERATED_OUT_DIR: &str = "src/generated"; - /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); - if !BUILD_GENERATED_FILES_IN_SRC { - return Ok(()); - } + miden_node_rocksdb_cxx_linkage_fix::configure(); + + let dst_dir = build_rs::input::out_dir().join("generated"); + + // Remove all existing files. + let _ = fs::remove_dir_all(&dst_dir); + fs::create_dir(&dst_dir) + .into_diagnostic() + .wrap_err("creating destination folder")?; // Get the file descriptor set let remote_prover_descriptor = remote_prover_api_descriptor(); // Build tonic code - build_tonic_from_descriptor(remote_prover_descriptor)?; + build_tonic_from_descriptor(remote_prover_descriptor, &dst_dir)?; Ok(()) } @@ -30,9 +30,12 @@ fn main() -> miette::Result<()> { // ================================================================================================ /// Builds tonic code from a `FileDescriptorSet` -fn build_tonic_from_descriptor(descriptor: FileDescriptorSet) -> miette::Result<()> { +fn build_tonic_from_descriptor( + descriptor: FileDescriptorSet, + dst_dir: &Path, +) -> miette::Result<()> { tonic_prost_build::configure() - .out_dir(GENERATED_OUT_DIR) + .out_dir(dst_dir) .build_server(true) .build_transport(true) .compile_fds_with_config(descriptor, tonic_prost_build::Config::new()) diff --git a/bin/remote-prover/grafana_dashboard.json b/bin/remote-prover/grafana_dashboard.json deleted file mode 100644 index bc391feba7..0000000000 --- a/bin/remote-prover/grafana_dashboard.json +++ /dev/null @@ -1,1082 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": 1, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 18, - "panels": [], - "title": "Requests", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "red", - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqpm" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Total requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Failed requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "red", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Accepted requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "green", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 9, - "x": 0, - "y": 1 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_count[1m]))", - "hide": false, - "instant": false, - "legendFormat": "Total requests", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_count[1m])) - sum(rate(rate_limited_requests[1m])) - sum(rate(queue_drop_count[1m]))", - "hide": false, - "instant": false, - "legendFormat": "Accepted requests", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_failure_count[1m]))", - "legendFormat": "Failed requests", - "range": true, - "refId": "A" - } - ], - "title": "Requests", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqpm" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Rate limited requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "orange", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Queue overflow requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "purple", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 9, - "x": 9, - "y": 1 - }, - "id": 16, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(rate_limited_requests[1m])", - "hide": false, - "instant": false, - "legendFormat": "Rate limited requests", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(queue_drop_count[1m])", - "hide": false, - "instant": false, - "legendFormat": "Queue overflow requests", - "range": true, - "refId": "C" - } - ], - "title": "Rejected requests", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-YlRd" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqpm" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 5, - "x": 18, - "y": 1 - }, - "id": 17, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(request_retries[1m])", - "legendFormat": "Retry rate", - "range": true, - "refId": "A" - } - ], - "title": "Request retry rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 9 - }, - "id": 13, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "(1 - rate(request_failure_count[1m]) / rate(request_count[1m])) * 100", - "legendFormat": "Success rate over time", - "range": true, - "refId": "A" - } - ], - "title": "Success rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 8, - "y": 9 - }, - "id": 11, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(request_latency_sum[1m]) / rate(request_latency_count[1m])", - "legendFormat": "Average request latency", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(queue_latency_sum[1m]) / rate(queue_latency_count[1m])", - "hide": false, - "instant": false, - "legendFormat": "Average queue latency", - "range": true, - "refId": "B" - } - ], - "title": "Latency", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 17 - }, - "id": 19, - "panels": [], - "title": "Workers", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 0, - "y": 18 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "worker_count", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total workers", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "worker_busy", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Busy workers", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Workers", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "red", - "mode": "fixed" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMax": 3, - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 7, - "y": 18 - }, - "id": 21, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "worker_unhealthy", - "legendFormat": "{{worker_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Unhealthy workers", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "auto", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 14, - "y": 18 - }, - "id": 12, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "frameIndex": 0, - "showHeader": true - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(worker_request_count[1m])", - "legendFormat": "{{worker_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Requests per worker", - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 26 - }, - "id": 20, - "panels": [], - "title": "Queue", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 15 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 0, - "y": 27 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "queue_size", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Queue size", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Queue size", - "type": "timeseries" - } - ], - "preload": false, - "refresh": "5s", - "schemaVersion": 40, - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "2025-03-31T19:02:51.110Z", - "to": "2025-03-31T19:04:03.015Z" - }, - "timepicker": {}, - "timezone": "browser", - "title": "tx_prover", - "uid": "be7bobzl5fr40f", - "version": 6, - "weekStart": "" -} diff --git a/bin/remote-prover/prometheus.yml b/bin/remote-prover/prometheus.yml deleted file mode 100644 index 817e92f244..0000000000 --- a/bin/remote-prover/prometheus.yml +++ /dev/null @@ -1,16 +0,0 @@ -global: - scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. - evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. - # scrape_timeout is set to the global default (10s). - -# A scrape configuration containing exactly one endpoint to scrape: -scrape_configs: - # The job name is a label that is used to group targets in the Prometheus UI. - # It can be any string. - - job_name: "remote_prover" - # Here you need to specify the address of the Prometheus service endpoint in the proxy - # We use the default port for Prometheus, but it need to be changed if you use a different host - # or port. In case of using Prometheus in a docker container, you can use the - # `host.docker.internal` address to access the host machine. - static_configs: - - targets: ["127.0.0.1:6192"] diff --git a/bin/remote-prover/src/api/mod.rs b/bin/remote-prover/src/api/mod.rs deleted file mode 100644 index 4aee8807b4..0000000000 --- a/bin/remote-prover/src/api/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use tokio::net::TcpListener; - -use crate::generated::api_server::ApiServer; -use crate::generated::worker_status_api_server::WorkerStatusApiServer; - -pub(crate) mod prover; -mod status; - -pub use prover::{ProofType, ProverRpcApi}; - -pub struct RpcListener { - pub api_service: ApiServer, - pub status_service: WorkerStatusApiServer, - pub listener: TcpListener, -} - -impl RpcListener { - pub fn new(listener: TcpListener, proof_type: ProofType) -> Self { - let prover_rpc_api = ProverRpcApi::new(proof_type); - let status_rpc_api = status::StatusRpcApi::new(proof_type); - let api_service = ApiServer::new(prover_rpc_api); - let status_service = WorkerStatusApiServer::new(status_rpc_api); - Self { api_service, status_service, listener } - } -} diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs deleted file mode 100644 index 24a70f7312..0000000000 --- a/bin/remote-prover/src/api/prover.rs +++ /dev/null @@ -1,355 +0,0 @@ -use miden_block_prover::LocalBlockProver; -use miden_node_proto::BlockProofRequest; -use miden_node_utils::ErrorReport; -use miden_protocol::MIN_PROOF_SECURITY_LEVEL; -use miden_protocol::batch::ProposedBatch; -use miden_protocol::transaction::TransactionInputs; -use miden_protocol::utils::Serializable; -use miden_tx::LocalTransactionProver; -use miden_tx_batch_prover::LocalBatchProver; -use serde::{Deserialize, Serialize}; -use tokio::sync::Mutex; -use tonic::{Request, Response, Status}; -use tracing::{info, instrument}; - -use crate::COMPONENT; -use crate::generated::api_server::Api as ProverApi; -use crate::generated::{self as proto}; - -/// Specifies the type of proof supported by the remote prover. -#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] -pub enum ProofType { - #[default] - Transaction, - Batch, - Block, -} - -impl std::fmt::Display for ProofType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ProofType::Transaction => write!(f, "transaction"), - ProofType::Batch => write!(f, "batch"), - ProofType::Block => write!(f, "block"), - } - } -} - -impl std::str::FromStr for ProofType { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "transaction" => Ok(ProofType::Transaction), - "batch" => Ok(ProofType::Batch), - "block" => Ok(ProofType::Block), - _ => Err(format!("Invalid proof type: {s}")), - } - } -} - -/// The prover for the remote prover. -/// -/// This enum is used to store the prover for the remote prover. -/// Only one prover is enabled at a time. -enum Prover { - Transaction(Mutex), - Batch(Mutex), - Block(Mutex), -} - -impl Prover { - fn new(proof_type: ProofType) -> Self { - match proof_type { - ProofType::Transaction => { - info!(target: COMPONENT, proof_type = ?proof_type, "Transaction prover initialized"); - Self::Transaction(Mutex::new(LocalTransactionProver::default())) - }, - ProofType::Batch => { - info!(target: COMPONENT, proof_type = ?proof_type, security_level = MIN_PROOF_SECURITY_LEVEL, "Batch prover initialized"); - Self::Batch(Mutex::new(LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL))) - }, - ProofType::Block => { - info!(target: COMPONENT, proof_type = ?proof_type, security_level = MIN_PROOF_SECURITY_LEVEL, "Block prover initialized"); - Self::Block(Mutex::new(LocalBlockProver::new(MIN_PROOF_SECURITY_LEVEL))) - }, - } - } -} - -pub struct ProverRpcApi { - prover: Prover, -} - -impl ProverRpcApi { - pub fn new(proof_type: ProofType) -> Self { - let prover = Prover::new(proof_type); - - Self { prover } - } - - #[allow(clippy::result_large_err)] - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_tx", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, transaction_id = tracing::field::Empty), - err - )] - pub async fn prove_tx( - &self, - tx_inputs: TransactionInputs, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Transaction(prover) = &self.prover else { - return Err(Status::unimplemented("Transaction prover is not enabled")); - }; - - let locked_prover = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))?; - - // Add a small delay to simulate longer proving time for testing - #[cfg(test)] - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - let proof = locked_prover.prove(tx_inputs).map_err(internal_error)?; - - // Record the transaction_id in the current tracing span - let transaction_id = proof.id(); - tracing::Span::current().record("transaction_id", tracing::field::display(&transaction_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proof.to_bytes() })) - } - - #[allow(clippy::result_large_err)] - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_batch", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, batch_id = tracing::field::Empty), - err - )] - pub fn prove_batch( - &self, - proposed_batch: ProposedBatch, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Batch(prover) = &self.prover else { - return Err(Status::unimplemented("Batch prover is not enabled")); - }; - - let proven_batch = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(proposed_batch) - .map_err(internal_error)?; - - // Record the batch_id in the current tracing span - let batch_id = proven_batch.id(); - tracing::Span::current().record("batch_id", tracing::field::display(&batch_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proven_batch.to_bytes() })) - } - - #[allow(clippy::result_large_err)] - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_block", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, block_id = tracing::field::Empty), - err - )] - pub fn prove_block( - &self, - proof_request: BlockProofRequest, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Block(prover) = &self.prover else { - return Err(Status::unimplemented("Block prover is not enabled")); - }; - let BlockProofRequest { tx_batches, block_header, block_inputs } = proof_request; - - // Record the commitment of the block in the current tracing span. - let block_id = block_header.commitment(); - tracing::Span::current().record("block_id", tracing::field::display(&block_id)); - - let block_proof = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(tx_batches, block_header, block_inputs) - .map_err(internal_error)?; - - Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) - } -} - -#[async_trait::async_trait] -impl ProverApi for ProverRpcApi { - #[instrument( - target = COMPONENT, - name = "remote_prover.prove", - skip_all, - ret(level = "debug"), - fields(request_id = tracing::field::Empty), - err - )] - async fn prove( - &self, - request: Request, - ) -> Result, tonic::Status> { - // Extract X-Request-ID header for trace correlation - let request_id = request - .metadata() - .get("x-request-id") - .and_then(|v| v.to_str().ok()) - .unwrap_or("unknown") - .to_string(); // Convert to owned string to avoid lifetime issues - - // Record the request_id in the current tracing span - tracing::Span::current().record("request_id", &request_id); - - // Extract the proof type and payload - let proof_request = request.into_inner(); - let proof_type = proof_request.proof_type(); - - match proof_type { - proto::remote_prover::ProofType::Transaction => { - let tx_inputs = proof_request.try_into().map_err(invalid_argument)?; - self.prove_tx(tx_inputs, &request_id).await - }, - proto::remote_prover::ProofType::Batch => { - let proposed_batch = proof_request.try_into().map_err(invalid_argument)?; - self.prove_batch(proposed_batch, &request_id) - }, - proto::remote_prover::ProofType::Block => { - let proof_request = proof_request.try_into().map_err(invalid_argument)?; - self.prove_block(proof_request, &request_id) - }, - } - } -} - -// UTILITIES -// ================================================================================================ - -fn internal_error(err: E) -> Status { - Status::internal(err.as_report()) -} - -fn invalid_argument(err: E) -> Status { - Status::invalid_argument(err.as_report()) -} - -// TESTS -// ================================================================================================ - -#[cfg(test)] -mod test { - use std::time::Duration; - - use miden_node_utils::cors::cors_for_grpc_web_layer; - use miden_protocol::asset::{Asset, FungibleAsset}; - use miden_protocol::note::NoteType; - use miden_protocol::testing::account_id::{ - ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, - ACCOUNT_ID_SENDER, - }; - use miden_protocol::transaction::ProvenTransaction; - use miden_testing::{Auth, MockChainBuilder}; - use miden_tx::utils::Serializable; - use tokio::net::TcpListener; - use tonic::Request; - use tonic_web::GrpcWebLayer; - - use crate::api::ProverRpcApi; - use crate::generated::api_client::ApiClient; - use crate::generated::api_server::ApiServer; - use crate::generated::{self as proto}; - - #[tokio::test(flavor = "multi_thread", worker_threads = 3)] - async fn test_prove_transaction() { - // Start the server in the background - let listener = TcpListener::bind("127.0.0.1:50052").await.unwrap(); - - let proof_type = proto::remote_prover::ProofType::Transaction; - - let api_service = ApiServer::new(ProverRpcApi::new(proof_type.into())); - - // Spawn the server as a background task - tokio::spawn(async move { - tonic::transport::Server::builder() - .accept_http1(true) - .layer(cors_for_grpc_web_layer()) - .layer(GrpcWebLayer::new()) - .add_service(api_service) - .serve_with_incoming(tokio_stream::wrappers::TcpListenerStream::new(listener)) - .await - .unwrap(); - }); - - // Give the server some time to start - tokio::time::sleep(Duration::from_secs(1)).await; - - // Set up a gRPC client to send the request - let mut client = ApiClient::connect("http://127.0.0.1:50052").await.unwrap(); - let mut client_2 = ApiClient::connect("http://127.0.0.1:50052").await.unwrap(); - - // Create a mock transaction to send to the server - let mut mock_chain_builder = MockChainBuilder::new(); - let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); - - let fungible_asset_1: Asset = - FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) - .unwrap() - .into(); - let note_1 = mock_chain_builder - .add_p2id_note( - ACCOUNT_ID_SENDER.try_into().unwrap(), - account.id(), - &[fungible_asset_1], - NoteType::Private, - ) - .unwrap(); - - let mock_chain = mock_chain_builder.build().unwrap(); - - let tx_context = mock_chain - .build_tx_context(account.id(), &[note_1.id()], &[]) - .unwrap() - .build() - .unwrap(); - - let executed_transaction = Box::pin(tx_context.execute()).await.unwrap(); - let tx_inputs = executed_transaction.tx_inputs(); - - let request_1 = Request::new(proto::remote_prover::ProofRequest { - proof_type: proto::remote_prover::ProofType::Transaction.into(), - payload: tx_inputs.to_bytes(), - }); - - let request_2 = Request::new(proto::remote_prover::ProofRequest { - proof_type: proto::remote_prover::ProofType::Transaction.into(), - payload: tx_inputs.to_bytes(), - }); - - // Send both requests concurrently - let (response_1, response_2) = - tokio::join!(client.prove(request_1), client_2.prove(request_2)); - - // Check the success response - assert!(response_1.is_ok() || response_2.is_ok()); - - // Check the failure response - assert!(response_1.is_err() || response_2.is_err()); - - let response_success = response_1.or(response_2).unwrap(); - - // Cast into a ProvenTransaction - let _proven_transaction: ProvenTransaction = - response_success.into_inner().try_into().expect("Failed to convert response"); - } -} diff --git a/bin/remote-prover/src/commands/mod.rs b/bin/remote-prover/src/commands/mod.rs deleted file mode 100644 index 13b21d8a50..0000000000 --- a/bin/remote-prover/src/commands/mod.rs +++ /dev/null @@ -1,125 +0,0 @@ -use std::time::Duration; - -use clap::Parser; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use proxy::StartProxy; -use tracing::instrument; -use update_workers::{AddWorkers, RemoveWorkers, UpdateWorkers}; -use worker::StartWorker; - -pub mod proxy; -pub mod update_workers; -pub mod worker; - -pub(crate) const PROXY_HOST: &str = "0.0.0.0"; - -#[derive(Debug, Parser)] -pub(crate) struct ProxyConfig { - /// Interval at which the system polls for available workers to assign new - /// tasks. - #[arg(long, default_value = "20ms", env = "MRP_AVAILABLE_WORKERS_POLLING_INTERVAL", value_parser = humantime::parse_duration)] - pub(crate) available_workers_polling_interval: Duration, - /// Maximum time to establish a connection. - #[arg(long, default_value = "10s", env = "MRP_CONNECTION_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) connection_timeout: Duration, - /// Health check interval. - #[arg(long, default_value = "10s", env = "MRP_HEALTH_CHECK_INTERVAL", value_parser = humantime::parse_duration)] - pub(crate) health_check_interval: Duration, - /// Maximum number of items in the queue. - #[arg(long, default_value = "10", env = "MRP_MAX_QUEUE_ITEMS")] - pub(crate) max_queue_items: usize, - /// Maximum number of requests per second per IP address. - #[arg(long, default_value = "5", env = "MRP_MAX_REQ_PER_SEC")] - pub(crate) max_req_per_sec: isize, - /// Maximum number of retries per request. - #[arg(long, default_value = "1", env = "MRP_MAX_RETRIES_PER_REQUEST")] - pub(crate) max_retries_per_request: usize, - /// Metrics configurations. - #[command(flatten)] - pub(crate) metrics_config: MetricsConfig, - /// Port of the proxy. - #[arg(long, default_value = "8082", env = "MRP_PORT")] - pub(crate) port: u16, - /// Maximum time allowed for a request to complete. Once exceeded, the request is - /// aborted. - #[arg(long, default_value = "100s", env = "MRP_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) timeout: Duration, - /// Control port. - /// - /// Port used to add and remove workers from the proxy. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - pub(crate) control_port: u16, - /// Supported proof type. - /// - /// The type of proof the proxy will handle. Only workers that support the same proof type - /// will be able to connect to the proxy. - #[arg(long, default_value = "transaction", env = "MRP_PROOF_TYPE")] - pub(crate) proof_type: ProofType, - /// Grace period before starting the final step of the graceful shutdown after - /// signaling shutdown. - #[arg(long, default_value = "20s", env = "MRP_GRACE_PERIOD", value_parser = humantime::parse_duration)] - pub(crate) grace_period: std::time::Duration, - /// Timeout of the final step for the graceful shutdown. - #[arg(long, default_value = "5s", env = "MRP_GRACEFUL_SHUTDOWN_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) graceful_shutdown_timeout: std::time::Duration, -} - -#[derive(Debug, Clone, clap::Parser)] -pub struct MetricsConfig { - /// Port for Prometheus-compatible metrics - /// If specified, metrics will be enabled on this port. If not specified, metrics will be - /// disabled. - #[arg(long, env = "MRP_METRICS_PORT")] - pub metrics_port: Option, -} - -/// Root CLI struct -#[derive(Parser, Debug)] -#[command( - name = "miden-remote-prover", - about = "A stand-alone service for proving Miden transactions.", - version, - rename_all = "kebab-case" -)] -pub struct Cli { - #[command(subcommand)] - action: Command, -} - -/// CLI actions -#[derive(Debug, Parser)] -pub enum Command { - /// Starts the workers with the configuration defined in the command. - StartWorker(StartWorker), - /// Starts the proxy. - StartProxy(StartProxy), - /// Adds workers to the proxy. - /// - /// This command will make a request to the proxy to add the specified workers. - AddWorkers(AddWorkers), - /// Removes workers from the proxy. - /// - /// This command will make a request to the proxy to remove the specified workers. - RemoveWorkers(RemoveWorkers), -} - -/// CLI entry point -impl Cli { - #[instrument(target = COMPONENT, name = "cli.execute", skip_all, ret(level = "info"), err)] - pub async fn execute(&self) -> anyhow::Result<()> { - match &self.action { - // For the `StartWorker` command, we need to create a new runtime and run the worker - Command::StartWorker(worker_init) => worker_init.execute().await, - Command::StartProxy(proxy_init) => proxy_init.execute().await, - Command::AddWorkers(update_workers) => { - let update_workers: UpdateWorkers = update_workers.clone().into(); - update_workers.execute().await - }, - Command::RemoveWorkers(update_workers) => { - let update_workers: UpdateWorkers = update_workers.clone().into(); - update_workers.execute().await - }, - } - } -} diff --git a/bin/remote-prover/src/commands/proxy.rs b/bin/remote-prover/src/commands/proxy.rs deleted file mode 100644 index e9266c948b..0000000000 --- a/bin/remote-prover/src/commands/proxy.rs +++ /dev/null @@ -1,129 +0,0 @@ -use clap::Parser; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::error::RemoteProverError; -use pingora::apps::HttpServerOptions; -use pingora::prelude::{Opt, background_service}; -use pingora::server::Server; -use pingora::server::configuration::ServerConf; -use pingora::services::listening::Service; -use pingora_proxy::http_proxy_service; -use tracing::{info, warn}; - -use super::ProxyConfig; -use crate::commands::PROXY_HOST; -use crate::proxy::update_workers::LoadBalancerUpdateService; -use crate::proxy::{LoadBalancer, LoadBalancerState}; -use crate::utils::check_port_availability; - -/// Starts the proxy. -/// -/// Example: `miden-remote-prover start-proxy --workers 0.0.0.0:8080,127.0.0.1:9090` -#[derive(Debug, Parser)] -pub struct StartProxy { - /// List of workers as host:port strings. - /// - /// Example: `127.0.0.1:8080,192.168.1.1:9090` - #[arg(long, env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Proxy configurations. - #[command(flatten)] - proxy_config: ProxyConfig, -} - -impl StartProxy { - /// Starts the proxy using the configuration defined in the command. - /// - /// This method will start a proxy with each worker passed as command argument as a backend, - /// using the configurations passed as options for the commands or the equivalent environmental - /// variables. - /// - /// # Errors - /// Returns an error in the following cases: - /// - The backend cannot be created. - /// - The Pingora configuration fails. - /// - The server cannot be started. - #[tracing::instrument(target = COMPONENT, name = "proxy.execute")] - pub async fn execute(&self) -> anyhow::Result<()> { - // Check if all required ports are available - check_port_availability(self.proxy_config.port, "Proxy")?; - check_port_availability(self.proxy_config.control_port, "Control")?; - - // First, check if the metrics port is specified (metrics enabled) - if let Some(metrics_port) = self.proxy_config.metrics_config.metrics_port { - check_port_availability(metrics_port, "Metrics")?; - } - - let mut conf = ServerConf::new().ok_or(RemoteProverError::PingoraConfigFailed( - "Failed to create server conf".to_string(), - ))?; - conf.grace_period_seconds = Some(self.proxy_config.grace_period.as_secs()); - conf.graceful_shutdown_timeout_seconds = - Some(self.proxy_config.graceful_shutdown_timeout.as_secs()); - - let mut server = Server::new_with_opt_and_conf(Some(Opt::default()), conf); - - server.bootstrap(); - - if self.workers.is_empty() { - warn!(target: COMPONENT, "Starting proxy without any workers"); - } else { - info!(target: COMPONENT, - worker_count = %self.workers.len(), - workers = ?self.workers, - "Proxy starting with workers" - ); - } - - let worker_lb = LoadBalancerState::new(self.workers.clone(), &self.proxy_config).await?; - - let health_check_service = background_service("health_check", worker_lb); - - let worker_lb = health_check_service.task(); - - let updater_service = LoadBalancerUpdateService::new(worker_lb.clone()); - - let mut update_workers_service = - Service::new("update_workers".to_string(), updater_service); - update_workers_service - .add_tcp(format!("{}:{}", PROXY_HOST, self.proxy_config.control_port).as_str()); - - // Set up the load balancer - let mut lb = http_proxy_service(&server.configuration, LoadBalancer(worker_lb.clone())); - - lb.add_tcp(format!("{}:{}", PROXY_HOST, self.proxy_config.port).as_str()); - info!(target: COMPONENT, - endpoint = %format!("{}:{}", PROXY_HOST, self.proxy_config.port), - "Proxy service listening" - ); - let logic = lb - .app_logic_mut() - .ok_or(RemoteProverError::PingoraConfigFailed("app logic not found".to_string()))?; - let mut http_server_options = HttpServerOptions::default(); - - // Enable HTTP/2 for plaintext - http_server_options.h2c = true; - logic.server_options = Some(http_server_options); - - // Enable Prometheus metrics if metrics_port is specified - if let Some(metrics_port) = self.proxy_config.metrics_config.metrics_port { - let metrics_addr = format!("{PROXY_HOST}:{metrics_port}"); - info!(target: COMPONENT, - endpoint = %metrics_addr, - "Metrics service initialized" - ); - let mut prometheus_service = - pingora::services::listening::Service::prometheus_http_service(); - prometheus_service.add_tcp(&metrics_addr); - server.add_service(prometheus_service); - } else { - info!(target: COMPONENT, "Metrics service disabled"); - } - - server.add_service(health_check_service); - server.add_service(update_workers_service); - server.add_service(lb); - tokio::task::spawn_blocking(|| server.run_forever()).await?; - - Ok(()) - } -} diff --git a/bin/remote-prover/src/commands/update_workers.rs b/bin/remote-prover/src/commands/update_workers.rs deleted file mode 100644 index c661a39dde..0000000000 --- a/bin/remote-prover/src/commands/update_workers.rs +++ /dev/null @@ -1,126 +0,0 @@ -use anyhow::Context; -use clap::Parser; -use reqwest::Client; -use serde::{Deserialize, Serialize}; - -use crate::commands::PROXY_HOST; - -// ADD WORKERS -// ================================================================================================ - -/// Add workers to the proxy -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct AddWorkers { - /// Workers to be added to the proxy. - /// - /// The workers are passed as host:port strings. - #[arg(value_name = "WORKERS", env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Port of the proxy endpoint to update workers. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - control_port: u16, -} - -// REMOVE WORKERS -// ================================================================================================ - -/// Remove workers from the proxy -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct RemoveWorkers { - /// Workers to be removed from the proxy. - /// - /// The workers are passed as host:port strings. - #[arg(value_name = "WORKERS", env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Port of the proxy endpoint to update workers. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - control_port: u16, -} - -// UPDATE WORKERS -// ================================================================================================ - -/// Action to perform on the workers -#[derive(clap::ValueEnum, Clone, Debug, Serialize, Deserialize)] -pub enum Action { - Add, - Remove, -} - -/// Update workers in the proxy performing the specified [`Action`] -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct UpdateWorkers { - pub action: Action, - pub workers: Vec, - pub control_port: u16, -} - -impl UpdateWorkers { - /// Makes a requests to the update workers endpoint to update the workers. - /// - /// It works by sending a GET request to the proxy with the query parameters. The query - /// parameters are serialized from the struct fields. - /// - /// It uses the URL defined in the env vars or passed as parameter for the proxy. - /// - /// The request will return the new number of workers in the X-Worker-Count header. - /// - /// # Errors - /// - If the query parameters cannot be serialized. - /// - If the request fails. - /// - If the status code is not successful. - /// - If the X-Worker-Count header is missing. - pub async fn execute(&self) -> anyhow::Result<()> { - let query_params = serde_qs::to_string(&self)?; - - println!("Action: {:?}, with workers: {:?}", self.action, self.workers); - - // Create the full URL with fixed host "0.0.0.0" - let url = format!("http://{}:{}?{}", PROXY_HOST, self.control_port, query_params); - - // Create an HTTP/2 client - let client = Client::builder().http2_prior_knowledge().build()?; - - // Make the request - let response = client.get(url).send().await?; - - // Check status code - if !response.status().is_success() { - anyhow::bail!("Request failed with status code: {}", response.status()); - } - - // Read the X-Worker-Count header - let workers_count = response - .headers() - .get("X-Worker-Count") - .context("Missing X-Worker-Count header")? - .to_str()?; - - println!("New number of workers: {workers_count}"); - - Ok(()) - } -} - -// CONVERSIONS -// ================================================================================================ - -impl From for UpdateWorkers { - fn from(remove_workers: RemoveWorkers) -> Self { - UpdateWorkers { - action: Action::Remove, - workers: remove_workers.workers, - control_port: remove_workers.control_port, - } - } -} - -impl From for UpdateWorkers { - fn from(add_workers: AddWorkers) -> Self { - UpdateWorkers { - action: Action::Add, - workers: add_workers.workers, - control_port: add_workers.control_port, - } - } -} diff --git a/bin/remote-prover/src/commands/worker.rs b/bin/remote-prover/src/commands/worker.rs deleted file mode 100644 index 1417e5baa4..0000000000 --- a/bin/remote-prover/src/commands/worker.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::time::Duration; - -use clap::Parser; -use miden_node_utils::cors::cors_for_grpc_web_layer; -use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; -use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::{ProofType, RpcListener}; -use miden_remote_prover::generated::api_server::ApiServer; -use tokio::net::TcpListener; -use tokio_stream::wrappers::TcpListenerStream; -use tonic_health::server::health_reporter; -use tonic_web::GrpcWebLayer; -use tower_http::trace::TraceLayer; -use tracing::{info, instrument}; - -/// Starts a worker. -#[derive(Debug, Parser)] -pub struct StartWorker { - /// Use localhost (127.0.0.1) instead of 0.0.0.0 - #[arg(long, env = "MRP_WORKER_LOCALHOST")] - localhost: bool, - /// The port of the worker - #[arg(long, default_value = "50051", env = "MRP_WORKER_PORT")] - port: u16, - /// The type of proof that the worker will be handling - #[arg(long, env = "MRP_WORKER_PROOF_TYPE")] - proof_type: ProofType, - /// Maximum time allowed for a request to complete. Once exceeded, the request is - /// aborted. - #[arg(long, default_value = "60s", env = "MRP_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) timeout: Duration, -} - -impl StartWorker { - /// Starts a worker. - /// - /// This method receives the port from the CLI and starts a worker on that port. - /// The host will be 127.0.0.1 if --localhost is specified, otherwise 0.0.0.0. - /// In case that the port is not provided, it will default to `50051`. - /// - /// The worker includes a health reporter that will mark the service as serving, following the - /// [gRPC health checking protocol]( - /// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto). - #[instrument(target = COMPONENT, name = "worker.execute")] - pub async fn execute(&self) -> anyhow::Result<()> { - let host = if self.localhost { "127.0.0.1" } else { "0.0.0.0" }; - let worker_addr = format!("{}:{}", host, self.port); - let rpc = RpcListener::new(TcpListener::bind(&worker_addr).await?, self.proof_type); - - let server_addr = rpc.listener.local_addr()?; - info!(target: COMPONENT, - endpoint = %server_addr, - proof_type = ?self.proof_type, - host = %host, - port = %self.port, - "Worker server initialized and listening" - ); - - // Create a health reporter - let (health_reporter, health_service) = health_reporter(); - - // Mark the service as serving - health_reporter.set_serving::>().await; - - tonic::transport::Server::builder() - .accept_http1(true) - .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) - .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .layer(cors_for_grpc_web_layer()) - .layer(GrpcWebLayer::new()) - .timeout(self.timeout) - .add_service(rpc.api_service) - .add_service(rpc.status_service) - .add_service(health_service) - .serve_with_incoming(TcpListenerStream::new(rpc.listener)) - .await?; - - Ok(()) - } -} diff --git a/bin/remote-prover/src/error.rs b/bin/remote-prover/src/error.rs deleted file mode 100644 index 16638c04ce..0000000000 --- a/bin/remote-prover/src/error.rs +++ /dev/null @@ -1,27 +0,0 @@ -use axum::http::uri::InvalidUri; -use thiserror::Error; - -// TX PROVER SERVICE ERROR -// ================================================================================================ - -#[derive(Debug, Error)] -pub enum RemoteProverError { - #[error("invalid uri {1}")] - InvalidURI(#[source] InvalidUri, String), - #[error("failed to connect to worker {1}")] - ConnectionFailed(#[source] tonic::transport::Error, String), - #[error("failed to create backend for worker")] - BackendCreationFailed(#[source] Box), - #[error("failed to setup pingora: {0}")] - PingoraConfigFailed(String), - #[error("failed to parse int: {0}")] - ParseError(#[from] std::num::ParseIntError), - #[error("port {1} is already in use: {0}")] - PortAlreadyInUse(#[source] std::io::Error, u16), -} - -impl From for String { - fn from(err: RemoteProverError) -> Self { - err.to_string() - } -} diff --git a/bin/remote-prover/src/generated/conversions.rs b/bin/remote-prover/src/generated/conversions.rs deleted file mode 100644 index e1bdc64069..0000000000 --- a/bin/remote-prover/src/generated/conversions.rs +++ /dev/null @@ -1,90 +0,0 @@ -// CONVERSIONS -// ================================================================================================ - -use miden_node_proto::BlockProofRequest; -use miden_protocol::batch::ProposedBatch; -use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; -use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; - -use crate::api::ProofType; -use crate::generated as proto; - -impl From for proto::Proof { - fn from(value: ProvenTransaction) -> Self { - proto::Proof { payload: value.to_bytes() } - } -} - -impl TryFrom for ProvenTransaction { - type Error = DeserializationError; - - fn try_from(response: proto::Proof) -> Result { - ProvenTransaction::read_from_bytes(&response.payload) - } -} - -impl TryFrom for TransactionInputs { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - TransactionInputs::read_from_bytes(&request.payload) - } -} - -impl TryFrom for ProposedBatch { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - ProposedBatch::read_from_bytes(&request.payload) - } -} - -impl TryFrom for BlockProofRequest { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - BlockProofRequest::read_from_bytes(&request.payload) - } -} - -impl From for proto::ProofType { - fn from(value: ProofType) -> Self { - match value { - ProofType::Transaction => proto::ProofType::Transaction, - ProofType::Batch => proto::ProofType::Batch, - ProofType::Block => proto::ProofType::Block, - } - } -} - -impl From for ProofType { - fn from(value: proto::ProofType) -> Self { - match value { - proto::ProofType::Transaction => ProofType::Transaction, - proto::ProofType::Batch => ProofType::Batch, - proto::ProofType::Block => ProofType::Block, - } - } -} - -impl TryFrom for ProofType { - type Error = String; - fn try_from(value: i32) -> Result { - match value { - 0 => Ok(ProofType::Transaction), - 1 => Ok(ProofType::Batch), - 2 => Ok(ProofType::Block), - _ => Err(format!("unknown ProverType value: {value}")), - } - } -} - -impl From for i32 { - fn from(value: ProofType) -> Self { - match value { - ProofType::Transaction => 0, - ProofType::Batch => 1, - ProofType::Block => 2, - } - } -} diff --git a/bin/remote-prover/src/generated/mod.rs b/bin/remote-prover/src/generated/mod.rs index eb7d89309e..c24a38e353 100644 --- a/bin/remote-prover/src/generated/mod.rs +++ b/bin/remote-prover/src/generated/mod.rs @@ -1,7 +1,8 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[rustfmt::skip] -pub mod remote_prover; -mod conversions; - +pub mod remote_prover { + include!(concat!(env!("OUT_DIR"), "/generated/remote_prover.rs")); +} pub use remote_prover::*; diff --git a/bin/remote-prover/src/generated/remote_prover.rs b/bin/remote-prover/src/generated/remote_prover.rs deleted file mode 100644 index b504804c3e..0000000000 --- a/bin/remote-prover/src/generated/remote_prover.rs +++ /dev/null @@ -1,1003 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Generates a proof for the requested payload. - async fn prove( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.Api/Prove" => { - #[allow(non_camel_case_types)] - struct ProveSvc(pub Arc); - impl tonic::server::UnaryService - for ProveSvc { - type Response = super::Proof; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::prove(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ProveSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod proxy_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ProxyStatusApiServer. - #[async_trait] - pub trait ProxyStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the proxy. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ProxyStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ProxyStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ProxyStatusApiServer - where - T: ProxyStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.ProxyStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::ProxyStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ProxyStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.ProxyStatusApi"; - impl tonic::server::NamedService for ProxyStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod worker_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with WorkerStatusApiServer. - #[async_trait] - pub trait WorkerStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the worker. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct WorkerStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl WorkerStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for WorkerStatusApiServer - where - T: WorkerStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.WorkerStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::WorkerStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for WorkerStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.WorkerStatusApi"; - impl tonic::server::NamedService for WorkerStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/bin/remote-prover/src/lib.rs b/bin/remote-prover/src/lib.rs deleted file mode 100644 index 0388ae685e..0000000000 --- a/bin/remote-prover/src/lib.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod api; -pub mod error; -pub mod generated; - -/// Component identifier for structured logging and tracing -pub const COMPONENT: &str = "miden-remote-prover"; diff --git a/bin/remote-prover/src/main.rs b/bin/remote-prover/src/main.rs index d4fc42f6da..e445d80f14 100644 --- a/bin/remote-prover/src/main.rs +++ b/bin/remote-prover/src/main.rs @@ -1,22 +1,20 @@ +use anyhow::Context; use clap::Parser; use miden_node_utils::logging::{OpenTelemetry, setup_tracing}; -use miden_remote_prover::COMPONENT; use tracing::info; -use crate::commands::Cli; +mod generated; +mod server; -pub(crate) mod commands; -pub(crate) mod proxy; -pub(crate) mod utils; +const COMPONENT: &str = "miden-prover"; #[tokio::main] async fn main() -> anyhow::Result<()> { let _otel_guard = setup_tracing(OpenTelemetry::Enabled)?; info!(target: COMPONENT, "Tracing initialized"); - // read command-line args - let cli = Cli::parse(); + let (handle, _port) = + server::Server::parse().spawn().await.context("failed to spawn server")?; - // execute cli action - cli.execute().await + handle.await.context("proof server panicked").flatten() } diff --git a/bin/remote-prover/src/proxy/health_check.rs b/bin/remote-prover/src/proxy/health_check.rs deleted file mode 100644 index b583c09827..0000000000 --- a/bin/remote-prover/src/proxy/health_check.rs +++ /dev/null @@ -1,70 +0,0 @@ -use miden_remote_prover::COMPONENT; -use pingora::prelude::sleep; -use pingora::server::ShutdownWatch; -use pingora::services::background::BackgroundService; -use tonic::async_trait; -use tracing::{debug_span, error}; - -use super::LoadBalancerState; - -/// Implement the [`BackgroundService`] trait for the [`LoadBalancerState`]. -/// -/// A [`BackgroundService`] can be run as part of a Pingora application to add supporting logic that -/// exists outside of the request/response lifecycle. -/// -/// We use this implementation to periodically check the health of the workers and update the list -/// of available workers. -#[async_trait] -impl BackgroundService for LoadBalancerState { - /// Starts the health check background service. - /// - /// This function is called when the Pingora server tries to start all the services. The - /// background service can return at anytime or wait for the `shutdown` signal. - /// - /// The health check background service will periodically check the health of the workers - /// using the gRPC status endpoint. If a worker is not healthy, it will be removed from - /// the list of available workers. - /// - /// # Errors - /// - If the worker has an invalid URI. - async fn start(&self, shutdown: ShutdownWatch) { - Box::pin(async move { - loop { - // Check if the shutdown signal has been received - { - if *shutdown.borrow() { - break; - } - } - - // Create a new spawn to perform the health check - let span = debug_span!(target: COMPONENT, "proxy.health_check"); - let _guard = span.enter(); - { - let mut workers = self.workers.write().await; - - for worker in workers.iter_mut() { - let status_result = worker.check_status(self.supported_proof_type).await; - - if let Err(ref reason) = status_result { - error!( - err = %reason, - worker.name = worker.name(), - "Worker failed health check" - ); - } - - worker.update_status(status_result); - } - } - - // Update the status cache with current worker status - self.update_status_cache().await; - - // Sleep for the defined interval before the next health check - sleep(self.health_check_interval).await; - } - }) - .await; - } -} diff --git a/bin/remote-prover/src/proxy/metrics.rs b/bin/remote-prover/src/proxy/metrics.rs deleted file mode 100644 index 9b5c579d9c..0000000000 --- a/bin/remote-prover/src/proxy/metrics.rs +++ /dev/null @@ -1,97 +0,0 @@ -use std::sync::LazyLock; - -use prometheus::{ - Histogram, - IntCounter, - IntCounterVec, - IntGauge, - register_histogram, - register_int_counter, - register_int_counter_vec, - register_int_gauge, -}; - -// SAFETY: The `unwrap` calls here are safe because: -// 1. The metrics being registered (gauges, counters, histograms) use hardcoded names and -// descriptions, which are guaranteed not to conflict within the application. -// 2. Registration errors occur only if there is a naming conflict, which is not possible in this -// context due to controlled metric definitions. -// 3. Any changes to metric names or types should be carefully reviewed to avoid conflicts. - -// QUEUE METRICS -// ================================================================================================ - -pub static QUEUE_SIZE: LazyLock = - LazyLock::new(|| register_int_gauge!("queue_size", "Number of requests in the queue").unwrap()); -pub static QUEUE_LATENCY: LazyLock = LazyLock::new(|| { - register_histogram!( - "queue_latency", - "Time (in seconds) requests spend in the queue", - vec![0.1, 0.5, 1.0, 2.0, 5.0, 10.0] - ) - .unwrap() -}); -pub static QUEUE_DROP_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("queue_drop_count", "Number of requests dropped due to a full queue") - .unwrap() -}); - -// WORKER METRICS -// ================================================================================================ - -pub static WORKER_COUNT: LazyLock = - LazyLock::new(|| register_int_gauge!("worker_count", "Total number of workers").unwrap()); -pub static WORKER_UNHEALTHY: LazyLock = LazyLock::new(|| { - register_int_counter_vec!( - "worker_unhealthy", - "Number of times that each worker was registered as unhealthy", - &["worker_id"] - ) - .unwrap() -}); -pub static WORKER_BUSY: LazyLock = - LazyLock::new(|| register_int_gauge!("worker_busy", "Number of busy workers").unwrap()); -pub static WORKER_REQUEST_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter_vec!( - "worker_request_count", - "Number of requests processed by each worker", - &["worker_id"] - ) - .unwrap() -}); - -// REQUEST METRICS -// ================================================================================================ - -pub static REQUEST_FAILURE_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("request_failure_count", "Number of failed requests").unwrap() -}); -pub static REQUEST_RETRIES: LazyLock = LazyLock::new(|| { - register_int_counter!("request_retries", "Number of request retries").unwrap() -}); -pub static REQUEST_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("request_count", "Number of requests processed").unwrap() -}); -pub static REQUEST_LATENCY: LazyLock = LazyLock::new(|| { - register_histogram!( - "request_latency", - "Time (in seconds) requests take to process", - vec![0.1, 0.5, 1.0, 2.0, 5.0, 10.0] - ) - .unwrap() -}); - -// RATE LIMITING METRICS -// ================================================================================================ - -pub static RATE_LIMITED_REQUESTS: LazyLock = LazyLock::new(|| { - register_int_counter!( - "rate_limited_requests", - "Number of requests blocked due to rate limiting" - ) - .unwrap() -}); -pub static RATE_LIMIT_VIOLATIONS: LazyLock = LazyLock::new(|| { - register_int_counter!("rate_limit_violations", "Number of rate limit violations by clients") - .unwrap() -}); diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs deleted file mode 100644 index 81290d73a9..0000000000 --- a/bin/remote-prover/src/proxy/mod.rs +++ /dev/null @@ -1,774 +0,0 @@ -use std::collections::VecDeque; -use std::sync::{Arc, LazyLock}; -use std::time::{Duration, Instant}; - -use async_trait::async_trait; -use bytes::Bytes; -use metrics::{ - QUEUE_LATENCY, - QUEUE_SIZE, - RATE_LIMIT_VIOLATIONS, - RATE_LIMITED_REQUESTS, - REQUEST_COUNT, - REQUEST_FAILURE_COUNT, - REQUEST_LATENCY, - REQUEST_RETRIES, - WORKER_BUSY, - WORKER_COUNT, - WORKER_REQUEST_COUNT, -}; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use miden_remote_prover::error::RemoteProverError; -use miden_remote_prover::generated::remote_prover::{ProxyStatus, ProxyWorkerStatus}; -use pingora::http::ResponseHeader; -use pingora::prelude::*; -use pingora::protocols::Digest; -use pingora::upstreams::peer::{ALPN, Peer}; -use pingora_core::Result; -use pingora_core::upstreams::peer::HttpPeer; -use pingora_limits::rate::Rate; -use pingora_proxy::{FailToProxy, ProxyHttp, Session}; -use tokio::sync::RwLock; -use tracing::{Span, debug, error, info, info_span, warn}; -use uuid::Uuid; -use worker::Worker; - -use crate::commands::ProxyConfig; -use crate::commands::update_workers::{Action, UpdateWorkers}; -use crate::utils::{ - create_queue_full_response, - create_response_with_error_message, - create_too_many_requests_response, - write_grpc_response_to_session, -}; - -mod health_check; -pub mod metrics; -pub(crate) mod update_workers; -pub(crate) mod worker; - -// CONSTANTS -// ================================================================================================ - -const PROXY_STATUS_PATH: &str = "/remote_prover.ProxyStatusApi/Status"; - -// LOAD BALANCER STATE -// ================================================================================================ - -/// Load balancer that uses a round robin strategy -#[derive(Debug)] -pub struct LoadBalancerState { - workers: Arc>>, - timeout: Duration, - connection_timeout: Duration, - max_queue_items: usize, - max_retries_per_request: usize, - max_req_per_sec: isize, - available_workers_polling_interval: Duration, - health_check_interval: Duration, - supported_proof_type: ProofType, - status_cache_sender: tokio::sync::watch::Sender, - status_cache_receiver: tokio::sync::watch::Receiver, -} - -impl LoadBalancerState { - /// Create a new load balancer - /// - /// # Errors - /// Returns an error if: - /// - The worker cannot be created. - #[tracing::instrument(target = COMPONENT, name = "proxy.new_load_balancer", skip(initial_workers))] - pub(crate) async fn new( - initial_workers: Vec, - config: &ProxyConfig, - ) -> core::result::Result { - let mut workers: Vec = Vec::with_capacity(initial_workers.len()); - - let connection_timeout = config.connection_timeout; - let total_timeout = config.timeout; - - for worker_addr in initial_workers { - match Worker::new(worker_addr, connection_timeout, total_timeout).await { - Ok(w) => workers.push(w), - Err(e) => { - error!("Failed to create worker: {}", e); - }, - } - } - - info!("Workers created: {:?}", workers); - - WORKER_COUNT.set(i64::try_from(workers.len()).expect("worker count greater than i64::MAX")); - RATE_LIMIT_VIOLATIONS.reset(); - RATE_LIMITED_REQUESTS.reset(); - REQUEST_RETRIES.reset(); - - let workers = Arc::new(RwLock::new(workers)); - let supported_proof_type = config.proof_type; - - // Build initial status for the cache - let initial_status = { - let workers_guard = workers.read().await; - build_proxy_status_response(&workers_guard, supported_proof_type) - }; - - // Create the status cache channel - let (status_cache_sender, status_cache_receiver) = - tokio::sync::watch::channel(initial_status); - - Ok(Self { - workers, - timeout: total_timeout, - connection_timeout, - max_queue_items: config.max_queue_items, - max_retries_per_request: config.max_retries_per_request, - max_req_per_sec: config.max_req_per_sec, - available_workers_polling_interval: config.available_workers_polling_interval, - health_check_interval: config.health_check_interval, - supported_proof_type, - status_cache_sender, - status_cache_receiver, - }) - } - - /// Gets an available worker and marks it as unavailable. - /// - /// If no worker is available, it will return None. - pub async fn pop_available_worker(&self) -> Option { - let mut available_workers = self.workers.write().await; - available_workers.iter_mut().find(|w| w.is_available()).map(|w| { - w.set_availability(false); - WORKER_BUSY.inc(); - w.clone() - }) - } - - /// Marks the given worker as available and moves it to the end of the list. - /// - /// If the worker is not in the list, it won't be added. - /// The worker is moved to the end of the list to avoid overloading since the selection of the - /// worker is done in order, causing the workers at the beginning of the list to be selected - /// more often. - pub async fn add_available_worker(&self, worker: Worker) { - let mut workers = self.workers.write().await; - if let Some(pos) = workers.iter().position(|w| *w == worker) { - // Remove the worker from its current position - let mut w = workers.remove(pos); - // Mark it as available - w.set_availability(true); - // Add it to the end of the list - workers.push(w); - } - } - - /// Updates the list of available workers based on the given action ("add" or "remove"). - /// - /// # Behavior - /// - /// ## Add Action - /// - If the worker exists in the current workers list, do nothing. - /// - Otherwise, add it and mark it as available. - /// - /// ## Remove Action - /// - If the worker exists in the current workers list, remove it. - /// - Otherwise, do nothing. - /// - /// # Errors - /// - If the worker cannot be created. - pub async fn update_workers( - &self, - update_workers: UpdateWorkers, - ) -> std::result::Result<(), RemoteProverError> { - let mut workers = self.workers.write().await; - info!("Current workers: {:?}", workers); - - let mut native_workers = Vec::new(); - - for worker_addr in update_workers.workers { - native_workers - .push(Worker::new(worker_addr, self.connection_timeout, self.timeout).await?); - } - - match update_workers.action { - Action::Add => { - for worker in native_workers { - if !workers.iter().any(|w| w == &worker) { - workers.push(worker); - } - } - }, - Action::Remove => { - for worker in native_workers { - workers.retain(|w| w != &worker); - } - }, - } - - info!("Workers updated: {:?}", workers); - WORKER_COUNT.set(i64::try_from(workers.len()).expect("worker count greater than i64::MAX")); - - Ok(()) - } - - /// Get the total number of current workers. - pub async fn num_workers(&self) -> usize { - self.workers.read().await.len() - } - - /// Get the number of busy workers. - pub async fn num_busy_workers(&self) -> usize { - self.workers.read().await.iter().filter(|w| !w.is_available()).count() - } - - /// Get the cached status response - pub fn get_cached_status(&self) -> ProxyStatus { - self.status_cache_receiver.borrow().clone() - } - - /// Update the status cache with current worker status - pub async fn update_status_cache(&self) { - let workers = self.workers.read().await; - let new_status = build_proxy_status_response(&workers, self.supported_proof_type); - self.status_cache_sender.send(new_status).expect("Failed to send new status"); - } -} - -// UTILS -// ================================================================================================ - -/// Rate limiter -static RATE_LIMITER: LazyLock = LazyLock::new(|| Rate::new(Duration::from_secs(1))); - -// REQUEST QUEUE -// ================================================================================================ - -/// Request queue holds the list of requests that are waiting to be processed by the workers and -/// the time they were enqueued. -/// It is used to keep track of the order of the requests to then assign them to the workers. -pub struct RequestQueue { - queue: RwLock>, -} - -impl RequestQueue { - /// Create a new empty request queue - #[allow(clippy::new_without_default)] - pub fn new() -> Self { - QUEUE_SIZE.set(0); - Self { queue: RwLock::new(VecDeque::new()) } - } - - /// Get the length of the queue - #[allow(clippy::len_without_is_empty)] - pub async fn len(&self) -> usize { - self.queue.read().await.len() - } - - /// Enqueue a request - pub async fn enqueue(&self, request_id: Uuid) { - QUEUE_SIZE.inc(); - let mut queue = self.queue.write().await; - queue.push_back((request_id, Instant::now())); - } - - /// Dequeue a request - pub async fn dequeue(&self) -> Option { - let mut queue = self.queue.write().await; - // If the queue was empty, the queue size does not change - if let Some((request_id, queued_time)) = queue.pop_front() { - QUEUE_SIZE.dec(); - QUEUE_LATENCY.observe(queued_time.elapsed().as_secs_f64()); - Some(request_id) - } else { - None - } - } - - /// Peek at the first request in the queue - pub async fn peek(&self) -> Option { - let queue = self.queue.read().await; - queue.front().copied().map(|(request_id, _)| request_id) - } -} - -/// Shared state. It keeps track of the order of the requests to then assign them to the workers. -static QUEUE: LazyLock = LazyLock::new(RequestQueue::new); - -// OPENTELEMETRY CONTEXT INJECTION -// ================================================================================================ - -/// Pingora `RequestHeader` injector for OpenTelemetry trace context propagation. -/// -/// This allows the proxy to inject trace context into headers that will be forwarded -/// to worker nodes, enabling proper parent-child trace relationships. -struct PingoraHeaderInjector<'a>(&'a mut pingora::http::RequestHeader); - -impl opentelemetry::propagation::Injector for PingoraHeaderInjector<'_> { - /// Set a key and value in the `RequestHeader` using pingora's API - fn set(&mut self, key: &str, value: String) { - // Use pingora's insert_header method which handles the proper header insertion - // Convert key to owned string to satisfy lifetime requirements - if let Err(e) = self.0.insert_header(key.to_string(), value) { - // Log error but don't fail the request if header injection fails - tracing::warn!(target: COMPONENT, header = %key, err = %e, "Failed to inject OpenTelemetry header"); - } - } -} - -// REQUEST CONTEXT -// ================================================================================================ - -/// Custom context for the request/response lifecycle -/// -/// We use this context to keep track of the number of tries for a request, the unique ID for the -/// request, the worker that will process the request, a span that will be used for traces along -/// the transaction execution, and a timer to track how long the request took. -#[derive(Debug)] -pub struct RequestContext { - /// Number of tries for the request - tries: usize, - /// Unique ID for the request - request_id: Uuid, - /// Worker that will process the request - worker: Option, - /// Parent span for the request - parent_span: Span, - /// Time when the request was created - created_at: Instant, -} - -impl RequestContext { - /// Create a new request context - fn new() -> Self { - let request_id = Uuid::new_v4(); - Self { - tries: 0, - request_id, - worker: None, - parent_span: info_span!(target: COMPONENT, "proxy.new_request", request_id = request_id.to_string()), - created_at: Instant::now(), - } - } - - /// Set the worker that will process the request - fn set_worker(&mut self, worker: Worker) { - WORKER_REQUEST_COUNT.with_label_values(&[&worker.name()]).inc(); - self.worker = Some(worker); - } -} - -// LOAD BALANCER -// ================================================================================================ - -/// Wrapper around the load balancer that implements the [`ProxyHttp`] trait -/// -/// This wrapper is used to implement the [`ProxyHttp`] trait for [`Arc`]. -/// This is necessary because we want to share the load balancer between the proxy server and the -/// health check background service. -#[derive(Debug)] -pub struct LoadBalancer(pub Arc); - -/// Implements load-balancing of incoming requests across a pool of workers. -/// -/// At the backend-level, a request lifecycle works as follows: -/// - When a new requests arrives, [`LoadBalancer::request_filter()`] method is called. In this -/// method we apply IP-based rate-limiting to the request and check if the request queue is full. -/// In this method we also handle the special case update workers request. -/// - Next, the [`Self::upstream_peer()`] method is called. We use it to figure out which worker -/// will process the request. Inside `upstream_peer()`, we add the request to the queue of -/// requests. Once the request gets to the front of the queue, we forward it to an available -/// worker. This step is also in charge of setting the SNI, timeouts, and enabling HTTP/2. -/// Finally, we establish a connection with the worker. -/// - Before sending the request to the upstream server and if the connection succeed, the -/// [`Self::upstream_request_filter()`] method is called. In this method, we ensure that the -/// correct headers are forwarded for gRPC requests. -/// - If the connection fails, the [`Self::fail_to_connect()`] method is called. In this method, we -/// retry the request [`self.max_retries_per_request`] times. -/// - Once the worker processes the request (either successfully or with a failure), -/// [`Self::logging()`] method is called. In this method, we log the request lifecycle and set the -/// worker as available. -#[async_trait] -impl ProxyHttp for LoadBalancer { - type CTX = RequestContext; - fn new_ctx(&self) -> Self::CTX { - RequestContext::new() - } - - /// Decide whether to filter the request or not. Also, handle the special case of the update - /// workers request or the proxy status request. - /// - /// The proxy status request is handled separately because it is used by the health check - /// service to check the status of the proxy and returns immediate response. - /// - /// Here we apply IP-based rate-limiting to the request. We also check if the queue is full. - /// - /// If the request is rate-limited, we return a 429 response. Otherwise, we return false. - #[tracing::instrument(name = "proxy.request_filter", parent = &ctx.parent_span, skip(session))] - async fn request_filter(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result - where - Self::CTX: Send + Sync, - { - // Extract the client address early - let client_addr = match session.client_addr() { - Some(addr) => addr.to_string(), - None => { - return create_response_with_error_message( - session.as_downstream_mut(), - "No socket address".to_string(), - ) - .await - .map(|_| true); - }, - }; - - Span::current().record("client_addr", client_addr.clone()); - - let path = session.downstream_session.req_header().uri.path(); - Span::current().record("path", path); - - // Check if the request is a grpc proxy status request by checking the path - if path == PROXY_STATUS_PATH { - let status = self.0.get_cached_status(); - return write_grpc_response_to_session(session, status).await.map(|_| true); - } - - // Increment the request count - REQUEST_COUNT.inc(); - - let user_id = Some(client_addr); - - // Retrieve the current window requests - let curr_window_requests = RATE_LIMITER.observe(&user_id, 1); - - // Rate limit the request - if curr_window_requests > self.0.max_req_per_sec { - RATE_LIMITED_REQUESTS.inc(); - - // Only count a violation the first time in a given window - if curr_window_requests == self.0.max_req_per_sec + 1 { - RATE_LIMIT_VIOLATIONS.inc(); - } - - return create_too_many_requests_response(session, self.0.max_req_per_sec) - .await - .map(|_| true); - } - - let queue_len = QUEUE.len().await; - - info!("New request with ID: {}", ctx.request_id); - info!("Queue length: {}", queue_len); - - // Check if the queue is full - if queue_len >= self.0.max_queue_items { - return create_queue_full_response(session).await.map(|_| true); - } - - Ok(false) - } - - /// Returns [`HttpPeer`] corresponding to the worker that will handle the current request. - /// - /// Here we enqueue the request and wait for it to be at the front of the queue and a worker - /// becomes available, then we dequeue the request and process it. We then set the SNI, - /// timeouts, and enable HTTP/2. - /// - /// Note that the request will be assigned a worker here, and the worker will be removed from - /// the list of available workers once it reaches the [`Self::logging`] method. - #[tracing::instrument(name = "proxy.upstream_peer", parent = &ctx.parent_span, skip(_session))] - async fn upstream_peer( - &self, - _session: &mut Session, - ctx: &mut Self::CTX, - ) -> Result> { - let request_id = ctx.request_id; - - // Add the request to the queue. - QUEUE.enqueue(request_id).await; - - // Wait for the request to be at the front of the queue - loop { - // The request is at the front of the queue. - if QUEUE.peek().await.expect("Queue should not be empty") != request_id { - continue; - } - - // Check if there is an available worker - if let Some(worker) = self.0.pop_available_worker().await { - debug!("Worker {} picked up the request with ID: {}", worker.name(), request_id); - ctx.set_worker(worker); - break; - } - debug!("All workers are busy"); - tokio::time::sleep(self.0.available_workers_polling_interval).await; - } - - // Remove the request from the queue - QUEUE.dequeue().await; - - // Set SNI - let mut http_peer = HttpPeer::new( - ctx.worker.clone().expect("Failed to get worker").name(), - false, - String::new(), - ); - let peer_opts = - http_peer.get_mut_peer_options().ok_or(Error::new(ErrorType::InternalError))?; - - // Timeout settings - peer_opts.total_connection_timeout = Some(self.0.timeout); - peer_opts.connection_timeout = Some(self.0.connection_timeout); - - // Enable HTTP/2 - peer_opts.alpn = ALPN::H2; - - let peer = Box::new(http_peer); - Ok(peer) - } - - /// Applies the necessary filters to the request before sending it to the upstream server. - /// - /// Here we ensure that the correct headers are forwarded for gRPC requests and inject - /// the X-Request-ID header and OpenTelemetry trace context for trace correlation between proxy - /// and worker. - /// - /// This method is called right after [`Self::upstream_peer()`] returns a [`HttpPeer`] and a - /// connection is established with the worker. - #[tracing::instrument(name = "proxy.upstream_request_filter", parent = &_ctx.parent_span, skip(_session))] - async fn upstream_request_filter( - &self, - _session: &mut Session, - upstream_request: &mut RequestHeader, - _ctx: &mut Self::CTX, - ) -> Result<()> - where - Self::CTX: Send + Sync, - { - // Check if it's a gRPC request - if let Some(content_type) = upstream_request.headers.get("content-type") - && content_type == "application/grpc" - { - // Ensure the correct host and gRPC headers are forwarded - upstream_request.insert_header("content-type", "application/grpc")?; - } - - // Always inject X-Request-ID header for trace correlation - // This allows the worker traces to be correlated with the proxy traces - upstream_request.insert_header("x-request-id", _ctx.request_id.to_string())?; - - // Inject OpenTelemetry trace context for proper trace propagation - // This allows the worker trace to be a child of the proxy trace - { - use tracing_opentelemetry::OpenTelemetrySpanExt; - let ctx = tracing::Span::current().context(); - opentelemetry::global::get_text_map_propagator(|propagator| { - propagator.inject_context(&ctx, &mut PingoraHeaderInjector(upstream_request)); - }); - } - - Ok(()) - } - - /// Retry the request if the connection fails. - #[tracing::instrument(name = "proxy.fail_to_connect", parent = &ctx.parent_span, skip(_session))] - fn fail_to_connect( - &self, - _session: &mut Session, - peer: &HttpPeer, - ctx: &mut Self::CTX, - mut e: Box, - ) -> Box { - if ctx.tries > self.0.max_retries_per_request { - return e; - } - REQUEST_RETRIES.inc(); - ctx.tries += 1; - e.set_retry(true); - e - } - - /// Logs the request lifecycle in case that an error happened and sets the worker as available. - /// - /// This method is the last one in the request lifecycle, no matter if the request was - /// processed or not. - #[tracing::instrument(name = "proxy.logging", parent = &ctx.parent_span, skip(_session))] - async fn logging(&self, _session: &mut Session, e: Option<&Error>, ctx: &mut Self::CTX) - where - Self::CTX: Send + Sync, - { - if let Some(e) = e { - REQUEST_FAILURE_COUNT.inc(); - error!("Error: {:?}", e); - } - - // Mark the worker as available - if let Some(worker) = ctx.worker.take() { - self.0.add_available_worker(worker).await; - } - - REQUEST_LATENCY.observe(ctx.created_at.elapsed().as_secs_f64()); - - // Update the number of busy workers - WORKER_BUSY.set( - i64::try_from(self.0.num_busy_workers().await) - .expect("busy worker count greater than i64::MAX"), - ); - } - - // The following methods are a copy of the default implementation defined in the trait, but - // with tracing instrumentation. - // Pingora calls these methods to handle the request/response lifecycle internally and since - // the trait is defined in a different crate, we cannot add the tracing instrumentation there. - // We use the default implementation by implementing the method for our specific type, adding - // the tracing instrumentation and internally calling `ProxyHttp` methods. - // ============================================================================================ - #[tracing::instrument(name = "proxy.early_request_filter", parent = &ctx.parent_span, skip(_session))] - async fn early_request_filter( - &self, - _session: &mut Session, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.early_request_filter(_session, &mut ()).await - } - - #[tracing::instrument(name = "proxy.connected_to_upstream", parent = &ctx.parent_span, skip(_session, _sock, _reused, _peer, _fd, _digest))] - async fn connected_to_upstream( - &self, - _session: &mut Session, - _reused: bool, - _peer: &HttpPeer, - #[cfg(unix)] _fd: std::os::unix::io::RawFd, - #[cfg(windows)] _sock: std::os::windows::io::RawSocket, - _digest: Option<&Digest>, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl - .connected_to_upstream(_session, _reused, _peer, _fd, _digest, &mut ()) - .await - } - - #[tracing::instrument(name = "proxy.request_body_filter", parent = &ctx.parent_span, skip(session, body))] - async fn request_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl - .request_body_filter(session, body, end_of_stream, &mut ()) - .await - } - - #[tracing::instrument(name = "proxy.upstream_response_filter", parent = &ctx.parent_span, skip(session, upstream_response))] - fn upstream_response_filter( - &self, - session: &mut Session, - upstream_response: &mut ResponseHeader, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.upstream_response_filter(session, upstream_response, &mut ()) - } - - #[tracing::instrument(name = "proxy.response_filter", parent = &ctx.parent_span, skip(session, upstream_response))] - async fn response_filter( - &self, - session: &mut Session, - upstream_response: &mut ResponseHeader, - ctx: &mut Self::CTX, - ) -> Result<()> - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.response_filter(session, upstream_response, &mut ()).await - } - - #[tracing::instrument(name = "proxy.upstream_response_body_filter", parent = &ctx.parent_span, skip(session, body))] - fn upstream_response_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.upstream_response_body_filter(session, body, end_of_stream, &mut ()) - } - - #[tracing::instrument(name = "proxy.response_body_filter", parent = &ctx.parent_span, skip(session, body))] - fn response_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result> - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.response_body_filter(session, body, end_of_stream, &mut ()) - } - - #[tracing::instrument(name = "proxy.fail_to_proxy", parent = &ctx.parent_span, skip(session))] - async fn fail_to_proxy( - &self, - session: &mut Session, - e: &Error, - ctx: &mut Self::CTX, - ) -> FailToProxy - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.fail_to_proxy(session, e, &mut ()).await - } - - #[tracing::instrument(name = "proxy.error_while_proxy", parent = &ctx.parent_span, skip(session))] - fn error_while_proxy( - &self, - peer: &HttpPeer, - session: &mut Session, - e: Box, - ctx: &mut Self::CTX, - client_reused: bool, - ) -> Box { - ProxyHttpDefaultImpl.error_while_proxy(peer, session, e, &mut (), client_reused) - } -} - -// PROXY HTTP DEFAULT IMPLEMENTATION -// ================================================================================================ - -/// Default implementation of the [`ProxyHttp`] trait. -/// -/// It is used to provide the default methods of the trait in order for the [`LoadBalancer`] to -/// implement the trait adding tracing instrumentation but without having to copy all default -/// implementations. -struct ProxyHttpDefaultImpl; - -#[async_trait] -impl ProxyHttp for ProxyHttpDefaultImpl { - type CTX = (); - fn new_ctx(&self) {} - - /// This method is the only one that does not have a default implementation in the trait. - async fn upstream_peer( - &self, - _session: &mut Session, - _ctx: &mut Self::CTX, - ) -> Result> { - unimplemented!("This is a dummy implementation, should not be called") - } -} - -// HELPERS -// ================================================================================================ - -/// Builds a `ProxyStatusResponse` from a list of workers and a supported proof type. -fn build_proxy_status_response(workers: &[Worker], supported_proof_type: ProofType) -> ProxyStatus { - let worker_statuses: Vec = - workers.iter().map(ProxyWorkerStatus::from).collect(); - ProxyStatus { - version: env!("CARGO_PKG_VERSION").to_string(), - supported_proof_type: supported_proof_type.into(), - workers: worker_statuses, - } -} diff --git a/bin/remote-prover/src/proxy/update_workers.rs b/bin/remote-prover/src/proxy/update_workers.rs deleted file mode 100644 index 320ac5a676..0000000000 --- a/bin/remote-prover/src/proxy/update_workers.rs +++ /dev/null @@ -1,152 +0,0 @@ -use core::fmt; -use std::sync::Arc; - -use miden_node_utils::ErrorReport; -use miden_remote_prover::COMPONENT; -use pingora::apps::{HttpServerApp, HttpServerOptions, ReusedHttpStream}; -use pingora::http::ResponseHeader; -use pingora::protocols::http::ServerSession; -use pingora::server::ShutdownWatch; -use tonic::async_trait; -use tracing::{error, info}; - -use super::LoadBalancerState; -use crate::commands::update_workers::UpdateWorkers; -use crate::utils::create_response_with_error_message; - -/// The Load Balancer Updater Service. -/// -/// This service is responsible for updating the list of workers in the load balancer. -pub(crate) struct LoadBalancerUpdateService { - lb_state: Arc, - server_opts: HttpServerOptions, -} - -/// Manually implement Debug for `LoadBalancerUpdateService`. -/// [`HttpServerOptions`] does not implement Debug, so we cannot derive Debug for -/// [`LoadBalancerUpdateService`], which is needed for the tracing instrumentation. -impl fmt::Debug for LoadBalancerUpdateService { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LBUpdaterService") - .field("lb_state", &self.lb_state) - .finish_non_exhaustive() - } -} - -impl LoadBalancerUpdateService { - pub(crate) fn new(lb_state: Arc) -> Self { - let mut server_opts = HttpServerOptions::default(); - server_opts.h2c = true; - - Self { lb_state, server_opts } - } -} - -#[async_trait] -impl HttpServerApp for LoadBalancerUpdateService { - /// Handles the update workers request. - /// - /// # Behavior - /// - Reads the HTTP request from the session. - /// - If query parameters are present, attempts to parse them as an `UpdateWorkers` object. - /// - If the parsing fails, returns an error response. - /// - If successful, updates the list of workers by calling `update_workers`. - /// - If the update is successful, returns the count of available workers. - /// - /// # Errors - /// - If the HTTP request cannot be read. - /// - If the query parameters cannot be parsed. - /// - If the workers cannot be updated. - /// - If the response cannot be created. - #[tracing::instrument(target = COMPONENT, name = "lb_updater_service.process_new_http", skip(http))] - async fn process_new_http( - self: &Arc, - mut http: ServerSession, - _shutdown: &ShutdownWatch, - ) -> Option { - match http.read_request().await { - Ok(res) => { - if !res { - error!("Failed to read request header"); - create_response_with_error_message( - &mut http, - "Failed to read request header".to_string(), - ) - .await - .ok(); - return None; - } - }, - Err(e) => { - error!("HTTP server fails to read from downstream: {e}"); - create_response_with_error_message( - &mut http, - format!("HTTP server fails to read from downstream: {e}"), - ) - .await - .ok(); - return None; - }, - } - - info!("Successfully get a new request to update workers"); - - // Extract and parse query parameters, if there are not any, return early. - let Some(query_params) = http.req_header().as_ref().uri.query() else { - let error_message = "No query parameters provided".to_string(); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - }; - - let update_workers: Result = serde_qs::from_str(query_params); - let update_workers = match update_workers { - Ok(workers) => workers, - Err(err) => { - let error_message = err.as_report_context("failed to parse query parameters"); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - }, - }; - - // Update workers and handle potential errors. - if let Err(err) = self.lb_state.update_workers(update_workers).await { - let error_message = err.as_report_context("failed to update workers"); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - } - - create_workers_updated_response(&mut http, self.lb_state.num_workers().await) - .await - .ok(); - - info!("Successfully updated workers"); - - None - } - - /// Provide HTTP server options used to override default behavior. This function will be called - /// every time a new connection is processed. - fn server_options(&self) -> Option<&HttpServerOptions> { - Some(&self.server_opts) - } -} - -// HELPERS -// ================================================================================================ - -/// Create a 200 response for updated workers -/// -/// It will set the X-Worker-Count header to the number of workers. -async fn create_workers_updated_response( - session: &mut ServerSession, - workers: usize, -) -> pingora_core::Result { - let mut header = ResponseHeader::build(200, None)?; - header.insert_header("X-Worker-Count", workers.to_string())?; - session.set_keepalive(None); - session.write_response_header(Box::new(header)).await?; - Ok(true) -} diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs deleted file mode 100644 index aa418e8cb3..0000000000 --- a/bin/remote-prover/src/proxy/worker.rs +++ /dev/null @@ -1,420 +0,0 @@ -use std::sync::LazyLock; -use std::time::{Duration, Instant}; - -use anyhow::Context; -use miden_node_utils::ErrorReport; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use miden_remote_prover::error::RemoteProverError; -use miden_remote_prover::generated::ProxyWorkerStatus; -use miden_remote_prover::generated::remote_prover::worker_status_api_client::WorkerStatusApiClient; -use pingora::lb::Backend; -use semver::{Version, VersionReq}; -use serde::Serialize; -use tonic::transport::Channel; -use tracing::{error, info}; - -use super::metrics::WORKER_UNHEALTHY; - -/// The maximum exponent for the backoff. -/// -/// The maximum backoff is 2^[`MAX_BACKOFF_EXPONENT`] seconds. -const MAX_BACKOFF_EXPONENT: usize = 9; - -/// The version of the proxy. -/// -/// This is the version of the proxy that is used to check the version of the worker. -const MRP_PROXY_VERSION: &str = env!("CARGO_PKG_VERSION"); - -/// The version requirement for the worker. -/// -/// This is the version requirement for the worker that is used to check the version of the worker. -static WORKER_VERSION_REQUIREMENT: LazyLock = LazyLock::new(|| { - let current = - Version::parse(MRP_PROXY_VERSION).expect("Proxy version should be valid at this point"); - VersionReq::parse(&format!("~{}.{}", current.major, current.minor)) - .expect("Version should be valid at this point") -}); - -// WORKER -// ================================================================================================ - -/// A worker used for processing of requests. -/// -/// The worker is used to process requests. -/// It has a backend, a status client, a health status, and a version. -/// The backend is used to send requests to the worker. -/// The status client is used to check the status of the worker. -/// The health status is used to determine if the worker is healthy or unhealthy. -/// The version is used to check if the worker is compatible with the proxy. -/// The `is_available` is used to determine if the worker is available to process requests. -/// The `connection_timeout` is used to set the timeout for the connection to the worker. -/// The `total_timeout` is used to set the timeout for the total request. -#[derive(Debug, Clone)] -pub struct Worker { - backend: Backend, - status_client: Option>, - is_available: bool, - health_status: WorkerHealthStatus, - version: String, - connection_timeout: Duration, - total_timeout: Duration, -} - -/// The health status of a worker. -/// -/// A worker can be either healthy or unhealthy. -/// If the worker is unhealthy, it will have a number of failed attempts. -/// The number of failed attempts is incremented each time the worker is unhealthy. -#[derive(Debug, Clone, PartialEq, Serialize)] -pub enum WorkerHealthStatus { - /// The worker is healthy. - Healthy, - /// The worker is unhealthy. - Unhealthy { - /// The number of failed attempts. - num_failed_attempts: usize, - /// The timestamp of the first failure. - #[serde(skip_serializing)] - first_fail_timestamp: Instant, - /// The reason for the failure. - reason: String, - }, - /// The worker status is unknown. - Unknown, -} - -impl Worker { - // CONSTRUCTOR - // -------------------------------------------------------------------------------------------- - - /// Creates a new worker and a gRPC status client for the given worker address. - /// - /// # Errors - /// - Returns [`RemoteProverError::BackendCreationFailed`] if the worker address is invalid. - pub async fn new( - worker_addr: String, - connection_timeout: Duration, - total_timeout: Duration, - ) -> Result { - let backend = - Backend::new(&worker_addr).map_err(RemoteProverError::BackendCreationFailed)?; - - let (status_client, health_status) = - match create_status_client(&worker_addr, connection_timeout, total_timeout).await { - Ok(client) => (Some(client), WorkerHealthStatus::Unknown), - Err(err) => { - error!("Failed to create status client for worker {}: {}", worker_addr, err); - ( - None, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: 1, - first_fail_timestamp: Instant::now(), - reason: err.as_report_context("failed to create status client"), - }, - ) - }, - }; - - Ok(Self { - backend, - is_available: health_status == WorkerHealthStatus::Unknown, - status_client, - health_status, - version: String::new(), - connection_timeout, - total_timeout, - }) - } - - // MUTATORS - // -------------------------------------------------------------------------------------------- - - /// Attempts to recreate the status client for this worker. - /// - /// This method will try to create a new gRPC status client using the worker's address - /// and timeout configurations. If successful, it will update the worker's `status_client` - /// field. - /// - /// # Returns - /// - `Ok(())` if the client was successfully created - /// - `Err(RemoteProverError)` if the client creation failed - async fn recreate_status_client(&mut self) -> Result<(), RemoteProverError> { - let name = self.name(); - match create_status_client(&name, self.connection_timeout, self.total_timeout).await { - Ok(client) => { - self.status_client = Some(client); - Ok(()) - }, - Err(err) => { - error!("Failed to recreate status client for worker {}: {}", name, err); - Err(err) - }, - } - } - - /// Checks the current status of the worker and returns the result without updating worker - /// state. - /// - /// Returns `Ok(())` if the worker is healthy and compatible, or `Err(reason)` if there's an - /// issue. The caller should use `update_status` to apply the result to the worker's health - /// status. - #[allow(clippy::too_many_lines)] - #[tracing::instrument(target = COMPONENT, name = "worker.check_status")] - pub async fn check_status(&mut self, supported_proof_type: ProofType) -> Result<(), String> { - if !self.should_do_health_check() { - return Ok(()); - } - - // If we don't have a status client, try to recreate it - if self.status_client.is_none() { - match self.recreate_status_client().await { - Ok(()) => { - info!("Successfully recreated status client for worker {}", self.name()); - }, - Err(err) => { - return Err(err.as_report_context("failed to recreate status client")); - }, - } - } - - let worker_status = match self.status_client.as_mut().unwrap().status(()).await { - Ok(response) => response.into_inner(), - Err(e) => { - error!("Failed to check worker status ({}): {}", self.name(), e); - return Err(e.message().to_string()); - }, - }; - - if worker_status.version.is_empty() { - return Err("Worker version is empty".to_string()); - } - - if !is_valid_version(&WORKER_VERSION_REQUIREMENT, &worker_status.version).unwrap_or(false) { - return Err(format!("Worker version is invalid ({})", worker_status.version)); - } - - self.version = worker_status.version; - - let worker_supported_proof_type = ProofType::try_from(worker_status.supported_proof_type) - .inspect_err(|err| { - error!(%err, name=%self.name(), "Failed to convert worker supported proof type"); - })?; - - if supported_proof_type != worker_supported_proof_type { - return Err(format!("Unsupported proof type: {supported_proof_type}")); - } - - Ok(()) - } - - /// Updates the worker's health status based on the result from `check_status`. - /// - /// If the result is `Ok(())`, the worker is marked as healthy. - /// If the result is `Err(reason)`, the worker is marked as unhealthy with the failure reason. - #[tracing::instrument(target = COMPONENT, name = "worker.update_status")] - pub fn update_status(&mut self, check_result: Result<(), String>) { - match check_result { - Ok(()) => { - self.set_health_status(WorkerHealthStatus::Healthy); - }, - Err(reason) => { - let failed_attempts = self.num_failures(); - self.set_health_status(WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts + 1, - first_fail_timestamp: match &self.health_status { - WorkerHealthStatus::Unhealthy { first_fail_timestamp, .. } => { - *first_fail_timestamp - }, - _ => Instant::now(), - }, - reason, - }); - }, - } - } - - /// Sets the worker availability. - pub fn set_availability(&mut self, is_available: bool) { - self.is_available = is_available; - } - - // PUBLIC ACCESSORS - // -------------------------------------------------------------------------------------------- - - /// Returns the number of failures the worker has had. - pub fn num_failures(&self) -> usize { - match &self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => 0, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts, - first_fail_timestamp: _, - reason: _, - } => *failed_attempts, - } - } - - /// Returns the health status of the worker. - pub fn health_status(&self) -> &WorkerHealthStatus { - &self.health_status - } - - /// Returns the version of the worker. - pub fn version(&self) -> &str { - &self.version - } - - /// Returns the worker availability. - /// - /// A worker is available if it is healthy and ready to process requests. - pub fn is_available(&self) -> bool { - self.is_available - } - - /// Returns the worker name. - pub fn name(&self) -> String { - self.backend.addr.to_string() - } - - /// Returns whether the worker is healthy. - /// - /// This function will return `true` if the worker is healthy or the health status is unknown. - /// Otherwise, it will return `false`. - pub fn is_healthy(&self) -> bool { - !matches!(self.health_status, WorkerHealthStatus::Unhealthy { .. }) - } - - // PRIVATE HELPERS - // -------------------------------------------------------------------------------------------- - - /// Returns whether the worker should do a health check. - /// - /// A worker should do a health check if it is healthy or if the time since the first failure - /// is greater than the time since the first failure power of 2. - /// - /// The maximum exponent is [`MAX_BACKOFF_EXPONENT`], which corresponds to a backoff of - /// 2^[`MAX_BACKOFF_EXPONENT`] seconds. - fn should_do_health_check(&self) -> bool { - match self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => true, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts, - first_fail_timestamp, - reason: _, - } => { - let time_since_first_failure = first_fail_timestamp.elapsed(); - time_since_first_failure - > Duration::from_secs( - 2u64.pow(failed_attempts.min(MAX_BACKOFF_EXPONENT) as u32), - ) - }, - } - } - - /// Sets the health status of the worker. - /// - /// This function will update the health status of the worker and update the worker availability - /// based on the new health status. - fn set_health_status(&mut self, health_status: WorkerHealthStatus) { - let was_healthy = self.is_healthy(); - self.health_status = health_status; - match &self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => { - if !was_healthy { - self.is_available = true; - } - }, - WorkerHealthStatus::Unhealthy { .. } => { - WORKER_UNHEALTHY.with_label_values(&[&self.name()]).inc(); - self.is_available = false; - }, - } - } -} - -// PARTIAL EQUALITY -// ================================================================================================ - -impl PartialEq for Worker { - fn eq(&self, other: &Self) -> bool { - self.backend == other.backend - } -} - -// CONVERSIONS -// ================================================================================================ - -/// Conversion from a Worker reference to a `WorkerStatus` proto message. -impl From<&Worker> for ProxyWorkerStatus { - fn from(worker: &Worker) -> Self { - use miden_remote_prover::generated::remote_prover::WorkerHealthStatus as ProtoWorkerHealthStatus; - Self { - name: worker.name(), - version: worker.version().to_string(), - status: match worker.health_status() { - WorkerHealthStatus::Healthy => ProtoWorkerHealthStatus::Healthy, - WorkerHealthStatus::Unhealthy { .. } => ProtoWorkerHealthStatus::Unhealthy, - WorkerHealthStatus::Unknown => ProtoWorkerHealthStatus::Unknown, - } as i32, - } - } -} - -// HELPER FUNCTIONS -// ================================================================================================ - -/// Create a gRPC [`StatusApiClient`] for the given worker address. -/// -/// # Errors -/// - [`RemoteProverError::InvalidURI`] if the worker address is invalid. -/// - [`RemoteProverError::ConnectionFailed`] if the connection to the worker fails. -async fn create_status_client( - address: &str, - connection_timeout: Duration, - total_timeout: Duration, -) -> Result, RemoteProverError> { - let channel = Channel::from_shared(format!("http://{address}")) - .map_err(|err| RemoteProverError::InvalidURI(err, address.to_string()))? - .connect_timeout(connection_timeout) - .timeout(total_timeout) - .connect() - .await - .map_err(|err| RemoteProverError::ConnectionFailed(err, address.to_string()))?; - - Ok(WorkerStatusApiClient::new(channel)) -} - -/// Returns true if the version has major and minor versions match that of the required version. -/// Returns false otherwise. -/// -/// # Errors -/// Returns an error if either of the versions is malformed. -fn is_valid_version(version_req: &VersionReq, version: &str) -> anyhow::Result { - let received = Version::parse(version).context("Invalid worker version: {err}")?; - - Ok(version_req.matches(&received)) -} - -// TESTS -// ================================================================================================ - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_valid_version() { - let version_req = VersionReq::parse("~1.0").unwrap(); - assert!(is_valid_version(&version_req, "1.0.0").unwrap()); - assert!(is_valid_version(&version_req, "1.0.1").unwrap()); - assert!(is_valid_version(&version_req, "1.0.12").unwrap()); - assert!(is_valid_version(&version_req, "1.0").is_err()); - assert!(!is_valid_version(&version_req, "2.0.0").unwrap()); - assert!(!is_valid_version(&version_req, "1.1.0").unwrap()); - assert!(!is_valid_version(&version_req, "0.9.0").unwrap()); - assert!(!is_valid_version(&version_req, "0.9.1").unwrap()); - assert!(!is_valid_version(&version_req, "0.10.0").unwrap()); - assert!(is_valid_version(&version_req, "miden").is_err()); - assert!(is_valid_version(&version_req, "1.miden.12").is_err()); - } -} diff --git a/bin/remote-prover/src/server/mod.rs b/bin/remote-prover/src/server/mod.rs new file mode 100644 index 0000000000..2ca74f5398 --- /dev/null +++ b/bin/remote-prover/src/server/mod.rs @@ -0,0 +1,103 @@ +use std::num::NonZeroUsize; + +use anyhow::Context; +use miden_node_utils::cors::cors_for_grpc_web_layer; +use miden_node_utils::panic::catch_panic_layer_fn; +use miden_node_utils::tracing::grpc::grpc_trace_fn; +use proof_kind::ProofKind; +use tokio::net::TcpListener; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::TcpListenerStream; +use tonic_web::GrpcWebLayer; +use tower_http::catch_panic::CatchPanicLayer; +use tower_http::trace::TraceLayer; + +use crate::generated::api_server::ApiServer; +use crate::server::service::ProverService; + +mod proof_kind; +mod prover; +mod service; +mod status; + +#[cfg(test)] +mod tests; + +/// A gRPC server providing a proving service for the Miden blockchain. +#[derive(clap::Parser)] +pub struct Server { + /// The port the gRPC server will be hosted on. + #[arg(long, default_value = "50051", env = "MIDEN_PROVER_PORT")] + port: u16, + /// The proof type that the prover will be handling. + #[arg(long, value_enum, env = "MIDEN_PROVER_KIND")] + kind: ProofKind, + /// Maximum time allowed for a proof request to complete. Once exceeded, the request is + /// aborted. + #[arg(long, default_value = "60s", env = "MIDEN_PROVER_TIMEOUT", value_parser = humantime::parse_duration)] + timeout: std::time::Duration, + /// Maximum number of concurrent proof requests that the prover will allow. + /// + /// Note that the prover only proves one request at a time; the rest are queued. This capacity + /// is used to limit the number of requests that can be queued at any given time, and includes + /// the one request that is currently being processed. + #[arg(long, default_value_t = NonZeroUsize::new(1).unwrap(), env = "MIDEN_PROVER_CAPACITY")] + capacity: NonZeroUsize, +} + +impl Server { + /// Spawns the prover server, returning its handle and the port it is listening on. + pub async fn spawn(&self) -> anyhow::Result<(JoinHandle>, u16)> { + let listener = TcpListener::bind(format!("0.0.0.0:{}", self.port)) + .await + .context("failed to bind to gRPC port")?; + + // We do this to get the actual port if configured with `self.port=0`. + let port = listener + .local_addr() + .expect("local address should exist for a tcp listener") + .port(); + + tracing::info!( + server.timeout=%humantime::Duration::from(self.timeout), + server.capacity=self.capacity, + proof.kind = %self.kind, + server.port = port, + "proof server listening" + ); + + let status_service = status::StatusService::new(self.kind); + let prover_service = ProverService::with_capacity(self.kind, self.capacity); + let prover_service = ApiServer::new(prover_service); + + let reflection_service = tonic_reflection::server::Builder::configure() + .register_file_descriptor_set(miden_node_proto_build::remote_prover_api_descriptor()) + .register_encoded_file_descriptor_set(tonic_health::pb::FILE_DESCRIPTOR_SET) + .build_v1() + .context("failed to build reflection service")?; + + // Create a gRPC health reporter. + let (health_reporter, health_service) = tonic_health::server::health_reporter(); + + // Mark the service as serving + health_reporter.set_serving::>().await; + + let server = tonic::transport::Server::builder() + .accept_http1(true) + .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) + .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) + .layer(cors_for_grpc_web_layer()) + .layer(GrpcWebLayer::new()) + .timeout(self.timeout) + .add_service(prover_service) + .add_service(status_service) + .add_service(health_service) + .add_service(reflection_service) + .serve_with_incoming(TcpListenerStream::new(listener)); + + let server = + tokio::spawn(async move { server.await.context("failed while serving proof server") }); + + Ok((server, port)) + } +} diff --git a/bin/remote-prover/src/server/proof_kind.rs b/bin/remote-prover/src/server/proof_kind.rs new file mode 100644 index 0000000000..ccd72ca305 --- /dev/null +++ b/bin/remote-prover/src/server/proof_kind.rs @@ -0,0 +1,35 @@ +use crate::generated as proto; + +/// Specifies the type of proof supported by the remote prover. +#[derive(Debug, Clone, Copy, PartialEq, clap::ValueEnum)] +pub enum ProofKind { + Transaction, + Batch, + Block, +} + +impl From for ProofKind { + fn from(value: proto::ProofType) -> Self { + match value { + proto::ProofType::Transaction => ProofKind::Transaction, + proto::ProofType::Batch => ProofKind::Batch, + proto::ProofType::Block => ProofKind::Block, + } + } +} + +impl std::fmt::Display for ProofKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProofKind::Transaction => write!(f, "transaction"), + ProofKind::Batch => write!(f, "batch"), + ProofKind::Block => write!(f, "block"), + } + } +} + +impl miden_node_utils::tracing::ToValue for ProofKind { + fn to_value(&self) -> opentelemetry::Value { + self.to_string().into() + } +} diff --git a/bin/remote-prover/src/server/prover.rs b/bin/remote-prover/src/server/prover.rs new file mode 100644 index 0000000000..6ca76794e5 --- /dev/null +++ b/bin/remote-prover/src/server/prover.rs @@ -0,0 +1,122 @@ +use miden_block_prover::LocalBlockProver; +use miden_node_proto::BlockProofRequest; +use miden_node_utils::ErrorReport; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::block::BlockProof; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; +use miden_tx::LocalTransactionProver; +use miden_tx_batch_prover::LocalBatchProver; +use tracing::instrument; + +use crate::COMPONENT; +use crate::generated::{self as proto}; +use crate::server::proof_kind::ProofKind; + +/// An enum representing the different types of provers available. +pub enum Prover { + Transaction(LocalTransactionProver), + Batch(LocalBatchProver), + Block(LocalBlockProver), +} + +impl Prover { + /// Constructs a [`Prover`] of the specified [`ProofKind`]. + pub fn new(proof_type: ProofKind) -> Self { + match proof_type { + ProofKind::Transaction => Self::Transaction(LocalTransactionProver::default()), + ProofKind::Batch => Self::Batch(LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL)), + ProofKind::Block => Self::Block(LocalBlockProver::new(MIN_PROOF_SECURITY_LEVEL)), + } + } + + /// Proves a [`proto::ProofRequest`] using the appropriate prover implementation as specified + /// during construction. + pub fn prove(&self, request: proto::ProofRequest) -> Result { + match self { + Prover::Transaction(prover) => prover.prove_request(request), + Prover::Batch(prover) => prover.prove_request(request), + Prover::Block(prover) => prover.prove_request(request), + } + } +} + +/// This trait abstracts over proof request handling by providing a common interface for our +/// different provers. +/// +/// It standardizes the proving process by providing default implementations for the decoding of +/// requests, and encoding of response. Notably it also standardizes the instrumentation, though +/// implementations should still add attributes that can only be known post-decoding of the request. +/// +/// Implementations of this trait only need to provide the input and outputs types, as well as the +/// proof implementation. +trait ProveRequest { + type Input: miden_protocol::utils::Deserializable; + type Output: miden_protocol::utils::Serializable; + + fn prove(&self, input: Self::Input) -> Result; + + /// Entry-point to the proof request handling. + /// + /// Decodes the request, proves it, and encodes the response. + fn prove_request(&self, request: proto::ProofRequest) -> Result { + Self::decode_request(request) + .and_then(|input| { + // We cannot #[instrument] the trait's prove method because it lacks an + // implementation, so we do it manually. + tracing::info_span!("prove", target = COMPONENT).in_scope(|| { + self.prove(input).inspect_err(|e| tracing::Span::current().set_error(e)) + }) + }) + .map(|output| Self::encode_response(output)) + } + + #[instrument(target=COMPONENT, skip_all, err)] + fn decode_request(request: proto::ProofRequest) -> Result { + use miden_protocol::utils::Deserializable; + + Self::Input::read_from_bytes(&request.payload).map_err(|e| { + tonic::Status::invalid_argument(e.as_report_context("failed to decode request")) + }) + } + + #[instrument(target=COMPONENT, skip_all)] + fn encode_response(output: Self::Output) -> proto::Proof { + use miden_protocol::utils::Serializable; + + proto::Proof { payload: output.to_bytes() } + } +} + +impl ProveRequest for LocalTransactionProver { + type Input = TransactionInputs; + type Output = ProvenTransaction; + + fn prove(&self, input: Self::Input) -> Result { + self.prove(input).map_err(|e| { + tonic::Status::internal(e.as_report_context("failed to prove transaction")) + }) + } +} + +impl ProveRequest for LocalBatchProver { + type Input = ProposedBatch; + type Output = ProvenBatch; + + fn prove(&self, input: Self::Input) -> Result { + self.prove(input) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove batch"))) + } +} + +impl ProveRequest for LocalBlockProver { + type Input = BlockProofRequest; + type Output = BlockProof; + + fn prove(&self, input: Self::Input) -> Result { + let BlockProofRequest { tx_batches, block_header, block_inputs } = input; + self.prove(tx_batches, &block_header, block_inputs) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove block"))) + } +} diff --git a/bin/remote-prover/src/server/service.rs b/bin/remote-prover/src/server/service.rs new file mode 100644 index 0000000000..4a72147a65 --- /dev/null +++ b/bin/remote-prover/src/server/service.rs @@ -0,0 +1,88 @@ +use std::num::NonZeroUsize; + +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use tokio::sync::{Mutex, MutexGuard, SemaphorePermit}; +use tracing::instrument; + +use crate::server::proof_kind::ProofKind; +use crate::server::prover::Prover; +use crate::{COMPONENT, generated as proto}; + +pub struct ProverService { + permits: tokio::sync::Semaphore, + prover: tokio::sync::Mutex, + kind: ProofKind, +} + +impl ProverService { + pub fn with_capacity(kind: ProofKind, capacity: NonZeroUsize) -> Self { + let permits = tokio::sync::Semaphore::new(capacity.get()); + let prover = Mutex::new(Prover::new(kind)); + Self { permits, prover, kind } + } + + fn is_supported(&self, kind: ProofKind) -> bool { + self.kind == kind + } + + #[instrument(target=COMPONENT, skip_all, err)] + fn acquire_permit(&self) -> Result, tonic::Status> { + self.permits + .try_acquire() + .map_err(|_| tonic::Status::resource_exhausted("proof queue is full")) + } + + #[instrument(target=COMPONENT, skip_all)] + async fn acquire_prover(&self) -> MutexGuard<'_, Prover> { + self.prover.lock().await + } +} + +#[async_trait::async_trait] +impl proto::api_server::Api for ProverService { + async fn prove( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + // Record X-Request-ID header for trace correlation + let request_id = request + .metadata() + .get("x-request-id") + .and_then(|v| v.to_str().ok()) + .unwrap_or("unknown"); + tracing::Span::current().set_attribute("request.id", request_id); + + // Check that the proof type is supported. + let request = request.into_inner(); + // Protobuf enums return a default value if the enum is set to an unknown value. + // This round trip checks that the value is valid. + if request.proof_type() as i32 != request.proof_type { + return Err(tonic::Status::invalid_argument("unknown proof_type value")); + } + let proof_kind = ProofKind::from(request.proof_type()); + tracing::Span::current().set_attribute("request.kind", proof_kind); + + // Reject unsupported proof types early so they don't clog the queue. + if !self.is_supported(proof_kind) { + return Err(tonic::Status::invalid_argument("unsupported proof type")); + } + + // This semaphore acts like a queue, but with a fixed capacity. + // + // We need to hold this until our request is processed to ensure that the queue capacity is + // not exceeded. + let _permit = self.acquire_permit()?; + + // This mutex is fair and uses FIFO ordering. + let prover = self.acquire_prover().await; + + // Blocking in place is fairly safe since we guarantee that only a single request is + // processed at a time. + // + // This has the downside that requests being proven cannot be cancelled since we are now + // outside the async runtime. This could occur if the server timeout is exceeded, or + // the client cancels the request. A different approach is technically possible, but + // would require more complex logic to handle cancellation in tandem with sync. + tokio::task::block_in_place(|| prover.prove(request)).map(tonic::Response::new) + } +} diff --git a/bin/remote-prover/src/api/status.rs b/bin/remote-prover/src/server/status.rs similarity index 51% rename from bin/remote-prover/src/api/status.rs rename to bin/remote-prover/src/server/status.rs index bb537b804b..6922f76167 100644 --- a/bin/remote-prover/src/api/status.rs +++ b/bin/remote-prover/src/server/status.rs @@ -1,25 +1,26 @@ +use proto::worker_status_api_server::WorkerStatusApiServer; use tonic::{Request, Response, Status}; -use crate::api::prover::ProofType; use crate::generated::worker_status_api_server::WorkerStatusApi; use crate::generated::{self as proto}; +use crate::server::proof_kind::ProofKind; -pub struct StatusRpcApi { - proof_type: ProofType, +pub struct StatusService { + kind: ProofKind, } -impl StatusRpcApi { - pub fn new(proof_type: ProofType) -> Self { - Self { proof_type } +impl StatusService { + pub fn new(kind: ProofKind) -> WorkerStatusApiServer { + WorkerStatusApiServer::new(Self { kind }) } } #[async_trait::async_trait] -impl WorkerStatusApi for StatusRpcApi { +impl WorkerStatusApi for StatusService { async fn status(&self, _: Request<()>) -> Result, Status> { Ok(Response::new(proto::WorkerStatus { version: env!("CARGO_PKG_VERSION").to_string(), - supported_proof_type: self.proof_type as i32, + supported_proof_type: self.kind as i32, })) } } diff --git a/bin/remote-prover/src/server/tests.rs b/bin/remote-prover/src/server/tests.rs new file mode 100644 index 0000000000..f1d526b16d --- /dev/null +++ b/bin/remote-prover/src/server/tests.rs @@ -0,0 +1,384 @@ +use std::collections::BTreeMap; +use std::num::NonZeroUsize; +use std::sync::Arc; +use std::time::Duration; + +use assert_matches::assert_matches; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::account::auth::AuthScheme; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::note::NoteType; +use miden_protocol::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; +use miden_protocol::transaction::{ExecutedTransaction, ProvenTransaction}; +use miden_testing::{Auth, MockChainBuilder}; +use miden_tx::utils::{Deserializable, Serializable}; +use miden_tx::{LocalTransactionProver, TransactionVerifier}; +use miden_tx_batch_prover::LocalBatchProver; +use serial_test::serial; + +use crate::generated::api_client::ApiClient; +use crate::generated::{Proof, ProofRequest, ProofType}; +use crate::server::Server; +use crate::server::proof_kind::ProofKind; + +/// A gRPC client with which to interact with the server. +#[derive(Clone)] +struct Client { + inner: ApiClient, +} + +impl Client { + async fn connect(port: u16) -> Self { + let inner = ApiClient::connect(format!("http://127.0.0.1:{port}")) + .await + .expect("client should connect"); + + Self { inner } + } + + async fn submit_request(&mut self, request: ProofRequest) -> Result { + self.inner.prove(request).await.map(tonic::Response::into_inner) + } +} + +impl ProofRequest { + /// Generates a proof request for a transaction using [`MockChain`]. + fn from_tx(tx: &ExecutedTransaction) -> Self { + let tx_inputs = tx.tx_inputs().clone(); + + Self { + proof_type: ProofType::Transaction as i32, + payload: tx_inputs.to_bytes(), + } + } + + fn from_batch(batch: &ProposedBatch) -> Self { + Self { + proof_type: ProofType::Batch as i32, + payload: batch.to_bytes(), + } + } + + async fn mock_tx() -> ExecutedTransaction { + // Create a mock transaction to send to the server + let mut mock_chain_builder = MockChainBuilder::new(); + let account = mock_chain_builder + .add_existing_wallet(Auth::BasicAuth { auth_scheme: AuthScheme::Falcon512Rpo }) + .unwrap(); + + let fungible_asset_1: Asset = + FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) + .unwrap() + .into(); + let note_1 = mock_chain_builder + .add_p2id_note( + ACCOUNT_ID_SENDER.try_into().unwrap(), + account.id(), + &[fungible_asset_1], + NoteType::Private, + ) + .unwrap(); + + let mock_chain = mock_chain_builder.build().unwrap(); + + let tx_context = mock_chain + .build_tx_context(account.id(), &[note_1.id()], &[]) + .unwrap() + .disable_debug_mode() + .build() + .unwrap(); + + Box::pin(tx_context.execute()).await.unwrap() + } + + async fn mock_batch() -> ProposedBatch { + // Create a mock transaction to send to the server + let mut mock_chain_builder = MockChainBuilder::new(); + let account = mock_chain_builder + .add_existing_wallet(Auth::BasicAuth { auth_scheme: AuthScheme::Falcon512Rpo }) + .unwrap(); + + let fungible_asset_1: Asset = + FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) + .unwrap() + .into(); + let note_1 = mock_chain_builder + .add_p2id_note( + ACCOUNT_ID_SENDER.try_into().unwrap(), + account.id(), + &[fungible_asset_1], + NoteType::Private, + ) + .unwrap(); + + let mock_chain = mock_chain_builder.build().unwrap(); + + let tx = mock_chain + .build_tx_context(account.id(), &[note_1.id()], &[]) + .unwrap() + .disable_debug_mode() + .build() + .unwrap(); + + let tx = Box::pin(tx.execute()).await.unwrap(); + let tx = tokio::task::block_in_place(|| { + LocalTransactionProver::default().prove(tx.tx_inputs().clone()).unwrap() + }); + + ProposedBatch::new( + vec![Arc::new(tx)], + mock_chain.latest_block_header(), + mock_chain.latest_partial_blockchain(), + BTreeMap::new(), + ) + .unwrap() + } +} + +// Test helpers for the server. +// +// Note: This is implemented under `#[cfg(test)]`. +impl Server { + /// A server configured with an arbitrary port (i.e. `port=0`) and the given kind. + /// + /// Capacity is set to 10 with a timeout of 60 seconds. + fn with_arbitrary_port(kind: ProofKind) -> Self { + Self { + port: 0, + kind, + timeout: Duration::from_secs(60), + capacity: NonZeroUsize::new(10).unwrap(), + } + } + + /// Overrides the capacity of the server. + /// + /// # Panics + /// + /// Panics if the given capacity is zero. + fn with_capacity(mut self, capacity: usize) -> Self { + self.capacity = NonZeroUsize::new(capacity).unwrap(); + self + } + + /// Overrides the timeout of the server. + fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } +} + +/// This test ensures that the legacy behaviour can still be configured. +/// +/// The original prover worker refused to process multiple requests concurrently. +/// This test ensures that the redesign behaves the same when limited to a capacity of 1. +/// +/// Create a server with a capacity of one and submit two requests. Ensure +/// that one succeeds and one fails with a resource exhaustion error. +#[serial] +#[tokio::test(flavor = "multi_thread")] +async fn legacy_behaviour_with_capacity_1() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_capacity(1) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client_a = Client::connect(port).await; + let mut client_b = client_a.clone(); + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request); + + let (first, second) = tokio::join!(a, b); + + // We cannot know which got served and which got rejected. + // We can only assert that one of them is Ok and the other is Err. + assert!(first.is_ok() || second.is_ok()); + assert!(first.is_err() || second.is_err()); + // We also expect that the error is a resource exhaustion error. + let err = first.err().or(second.err()).unwrap(); + assert_eq!(err.code(), tonic::Code::ResourceExhausted); + + server.abort(); +} + +/// Test that multiple requests can be queued and capacity is respected. +/// +/// Create a server with a capacity of two and submit three requests. Ensure +/// that two succeed and one fails with a resource exhaustion error. +#[ignore = "Proving 3 requests concurrently causes temporary CI resource starvation which results in _sporadic_ timeouts"] +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn capacity_is_respected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_capacity(2) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + let mut client_a = Client::connect(port).await; + let mut client_b = client_a.clone(); + let mut client_c = client_a.clone(); + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request.clone()); + let c = client_c.submit_request(request); + + let (first, second, third) = tokio::join!(a, b, c); + + // We cannot know which got served and which got rejected. + // We can only assert that two succeeded and one failed. + let mut expected = [true, true, false]; + let mut result = [first.is_ok(), second.is_ok(), third.is_ok()]; + expected.sort_unstable(); + result.sort_unstable(); + assert_eq!(expected, result); + + assert_matches!(first.err().or(second.err()).or(third.err()), Some(err) => { + assert_eq!(err.code(), tonic::Code::ResourceExhausted); + }); + + server.abort(); +} + +/// Ensures that the server request timeout is adhered to. +/// +/// We cannot actually enforce this for a request that has already being proven as the proof +/// is done in a blocking sync task. We can however check that a second queued request is rejected. +/// +/// This is tricky to test properly because we can't easily control the server's response time. +/// Instead we configure the server to have a ridiculously short timeout which should hopefully +/// always timeout. +#[tokio::test(flavor = "multi_thread")] +async fn timeout_is_respected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_timeout(Duration::from_nanos(10)) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client_a = Client::connect(port).await; + let mut client_b = Client::connect(port).await; + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request); + + let (a, b) = tokio::join!(a, b); + + // At least one of the requests should timeout. + let err = a.err().or(b.err()).unwrap(); + + assert_eq!(err.code(), tonic::Code::Cancelled); + assert!(err.message().contains("Timeout expired")); + + server.abort(); +} + +/// Ensures that an invalid proof kind is rejected. +/// +/// The error should be an invalid argument error, but since that is fairly broad we also inspect +/// the error message for mention of the invalid proof kind. This is technically an implementation +/// detail, but its the best we have without adding multiple abstraction layers. +#[tokio::test(flavor = "multi_thread")] +async fn invalid_proof_kind_is_rejected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .spawn() + .await + .expect("server should spawn"); + + let mut request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + request.proof_type = i32::MAX; + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await; + let err = response.unwrap_err(); + + assert_eq!(err.code(), tonic::Code::InvalidArgument); + assert!(err.message().contains("unknown proof_type value")); + + server.abort(); +} + +/// Ensures that a valid but unsupported proof kind is rejected. +/// +/// Aka submit a transaction proof request to a batch proving server. +/// +/// The error should be an invalid argument error, but since that is fairly broad we also inspect +/// the error message for mention of the unsupported proof kind. This is technically an +/// implementation detail, but its the best we have without adding multiple abstraction layers. +#[tokio::test(flavor = "multi_thread")] +async fn unsupported_proof_kind_is_rejected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Batch) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await; + let err = response.unwrap_err(); + + assert_eq!(err.code(), tonic::Code::InvalidArgument); + assert!(err.message().contains("unsupported proof type")); + + server.abort(); +} + +/// Checks that the a transaction request results in a correct proof. +/// +/// The proof is verified and the transaction IDs of request and response must correspond. +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn transaction_proof_is_correct() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .spawn() + .await + .expect("server should spawn"); + + let tx = ProofRequest::mock_tx().await; + let request = ProofRequest::from_tx(&tx); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await.unwrap(); + let response = ProvenTransaction::read_from_bytes(&response.payload).unwrap(); + + assert_eq!(response.id(), tx.id()); + TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL).verify(&response).unwrap(); + + server.abort(); +} + +/// Checks that the a batch request results in a correct proof. +/// +/// The proof is replicated locally, which ensures that the gRPC codec and server code do the +/// correct thing. +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn batch_proof_is_correct() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Batch) + .spawn() + .await + .expect("server should spawn"); + + let batch = ProofRequest::mock_batch().await; + let request = ProofRequest::from_batch(&batch); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await.unwrap(); + let response = ProvenBatch::read_from_bytes(&response.payload).unwrap(); + + let expected = tokio::task::block_in_place(|| { + LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL).prove(batch).unwrap() + }); + assert_eq!(response, expected); + + server.abort(); +} diff --git a/bin/remote-prover/src/utils.rs b/bin/remote-prover/src/utils.rs deleted file mode 100644 index 1214911364..0000000000 --- a/bin/remote-prover/src/utils.rs +++ /dev/null @@ -1,178 +0,0 @@ -use std::net::TcpListener; - -use http::{HeaderMap, HeaderName, HeaderValue}; -use miden_remote_prover::error::RemoteProverError; -use pingora::http::ResponseHeader; -use pingora::protocols::http::ServerSession; -use pingora::{Error, ErrorType}; -use pingora_proxy::Session; -use prost::Message; -use tonic::Code; -use tracing::debug; - -use crate::COMPONENT; -use crate::commands::PROXY_HOST; -use crate::proxy::metrics::QUEUE_DROP_COUNT; - -// CONSTANTS -// ================================================================================================ -const GRPC_CONTENT_TYPE: HeaderValue = HeaderValue::from_static("application/grpc"); -const GRPC_STATUS_HEADER: HeaderName = HeaderName::from_static("grpc-status"); -const GRPC_MESSAGE_HEADER: HeaderName = HeaderName::from_static("grpc-message"); - -/// Build gRPC trailers with status and optional message -fn build_grpc_trailers( - grpc_status: Code, - error_message: Option<&str>, -) -> pingora_core::Result { - let mut trailers = HeaderMap::new(); - - // Set gRPC status - let status_code = (grpc_status as i32).to_string(); - trailers.insert( - GRPC_STATUS_HEADER, - status_code.parse().map_err(|e| { - Error::because(ErrorType::InternalError, format!("Failed to parse grpc-status: {e}"), e) - })?, - ); - - // Set gRPC message if provided - if let Some(message) = error_message { - trailers.insert( - GRPC_MESSAGE_HEADER, - message.parse().map_err(|e| { - Error::because( - ErrorType::InternalError, - format!("Failed to parse grpc-message: {e}"), - e, - ) - })?, - ); - } - - Ok(trailers) -} - -/// Write a protobuf message as a gRPC response to a Pingora session -/// -/// This helper function takes a protobuf message and writes it to a Pingora session -/// in the proper gRPC format, handling message encoding, headers, and trailers. -pub async fn write_grpc_response_to_session( - session: &mut Session, - message: T, -) -> pingora_core::Result<()> -where - T: Message, -{ - // Serialize the protobuf message - let mut response_body = Vec::new(); - message.encode(&mut response_body).map_err(|e| { - Error::because(ErrorType::InternalError, format!("Failed to encode proto response: {e}"), e) - })?; - - let mut grpc_message = Vec::new(); - - // Add compression flag (1 byte, 0 = no compression) - grpc_message.push(0u8); - - // Add message length (4 bytes, big-endian) - let msg_len = response_body.len() as u32; - grpc_message.extend_from_slice(&msg_len.to_be_bytes()); - - // Add the actual message - grpc_message.extend_from_slice(&response_body); - - // Create gRPC response headers WITHOUT grpc-status (that goes in trailers) - let mut header = ResponseHeader::build(200, None)?; - header.insert_header(http::header::CONTENT_TYPE, GRPC_CONTENT_TYPE)?; - - session.set_keepalive(None); - session.write_response_header(Box::new(header), false).await?; - session.write_response_body(Some(grpc_message.into()), false).await?; - - // Send trailers with gRPC status - let trailers = build_grpc_trailers(Code::Ok, None)?; - session.write_response_trailers(trailers).await?; - - Ok(()) -} - -/// Write a gRPC error response to a Pingora session -/// -/// This helper function creates a proper gRPC error response with the specified -/// status code and error message. -pub async fn write_grpc_error_to_session( - session: &mut Session, - grpc_status: Code, - error_message: &str, -) -> pingora_core::Result<()> { - // Create gRPC response headers (always HTTP 200 for gRPC) - let mut header = ResponseHeader::build(200, None)?; - header.insert_header(http::header::CONTENT_TYPE, GRPC_CONTENT_TYPE)?; - - session.set_keepalive(None); - session.write_response_header(Box::new(header), false).await?; - - // gRPC errors don't have a body, just headers and trailers - session.write_response_body(None, false).await?; - - // Send trailers with gRPC status and error message - let trailers = build_grpc_trailers(grpc_status, Some(error_message))?; - session.write_response_trailers(trailers).await?; - - Ok(()) -} - -/// Create a gRPC `RESOURCE_EXHAUSTED` response for a full queue -pub(crate) async fn create_queue_full_response(session: &mut Session) -> pingora_core::Result<()> { - // Increment the queue drop count metric - QUEUE_DROP_COUNT.inc(); - - // Use our helper function to create a proper gRPC error response - write_grpc_error_to_session(session, Code::ResourceExhausted, "Too many requests in the queue") - .await -} - -/// Create a gRPC `RESOURCE_EXHAUSTED` response for rate limiting -pub async fn create_too_many_requests_response( - session: &mut Session, - max_request_per_second: isize, -) -> pingora_core::Result<()> { - // Use our helper function to create a proper gRPC error response - let error_message = - format!("Rate limit exceeded: {max_request_per_second} requests per second"); - write_grpc_error_to_session(session, Code::ResourceExhausted, &error_message).await -} - -/// Create a 400 response with an error message -/// -/// It will set the X-Error-Message header to the error message. -pub async fn create_response_with_error_message( - session: &mut ServerSession, - error_msg: String, -) -> pingora_core::Result<()> { - let mut header = ResponseHeader::build(400, None)?; - header.insert_header("X-Error-Message", error_msg)?; - session.set_keepalive(None); - session.write_response_header(Box::new(header)).await?; - Ok(()) -} - -/// Checks if a port is available for use. -/// -/// # Arguments -/// * `port` - The port to check. -/// * `service` - A descriptive name for the service (for logging purposes). -/// -/// # Returns -/// * `Ok(TcpListener)` if the port is available. -/// * `Err(RemoteProverError::PortAlreadyInUse)` if the port is already in use. -pub fn check_port_availability( - port: u16, - service: &str, -) -> Result { - let addr = format!("{PROXY_HOST}:{port}"); - TcpListener::bind(&addr) - .inspect(|_| debug!(target: COMPONENT, %service, %port, %addr, "Port is available")) - .map_err(|err| RemoteProverError::PortAlreadyInUse(err, port)) -} diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index b9df84d41d..9c3fe9387d 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -21,7 +21,6 @@ clap = { features = ["derive"], version = "4.5" } fs-err = { workspace = true } futures = { workspace = true } miden-air = { features = ["testing"], workspace = true } -miden-block-prover = { features = ["testing"], workspace = true } miden-node-block-producer = { workspace = true } miden-node-proto = { workspace = true } miden-node-store = { workspace = true } @@ -33,3 +32,6 @@ rayon = { version = "1.10" } tokio = { workspace = true } tonic = { default-features = true, workspace = true } url = { workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/stress-test/README.md b/bin/stress-test/README.md index 4d8c283c6e..d60a611907 100644 --- a/bin/stress-test/README.md +++ b/bin/stress-test/README.md @@ -20,14 +20,14 @@ This command allows to run stress tests against the Store component. These tests The endpoints that you can test are: - `load_state` -- `sync_state` - `sync_notes` - `sync_nullifiers` - `sync_transactions` +- `sync-chain-mmr` Most benchmarks accept options to control the number of iterations and concurrency level. The `load_state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. -**Note on Concurrency**: For the endpoints that support it (`sync_state`, `sync_notes`, `sync_nullifiers`), the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. +**Note on Concurrency**: For the endpoints that support it (`sync_notes`, `sync_nullifiers`), the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. Example usage: @@ -119,18 +119,6 @@ Database contains 99961 accounts and 99960 nullifiers **Performance Note**: The load-state benchmark shows that account tree loading (~21.3s) and nullifier tree loading (~21.5s) are the primary bottlenecks, while MMR loading and database connection are negligible (<3ms each). -- sync-state -``` bash -$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-state - -Average request latency: 1.120061ms -P50 request latency: 1.106042ms -P95 request latency: 1.530708ms -P99 request latency: 1.919209ms -P99.9 request latency: 5.795125ms -Average notes per response: 1.3159 -``` - - sync-notes ``` bash $ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-notes @@ -171,5 +159,21 @@ Pagination statistics: Average pages per run: 2.00 ``` +- sync-chain-mmr +``` bash +$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-chain-mmr --block-range 1000 + +Average request latency: 1.021ms +P50 request latency: 0.981ms +P95 request latency: 1.412ms +P99 request latency: 1.822ms +P99.9 request latency: 3.174ms +Pagination statistics: + Total runs: 10000 + Runs triggering pagination: 1 + Pagination rate: 0.01% + Average pages per run: 1.00 +``` + ## License This project is [MIT licensed](../../LICENSE). diff --git a/bin/stress-test/build.rs b/bin/stress-test/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/bin/stress-test/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/stress-test/src/main.rs b/bin/stress-test/src/main.rs index 095b04caf1..a5cc82f9f4 100644 --- a/bin/stress-test/src/main.rs +++ b/bin/stress-test/src/main.rs @@ -4,9 +4,9 @@ use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; use seeding::seed_store; use store::{ + bench_sync_chain_mmr, bench_sync_notes, bench_sync_nullifiers, - bench_sync_state, bench_sync_transactions, load_state, }; @@ -70,8 +70,6 @@ pub enum Endpoint { #[arg(short, long, value_name = "PREFIXES", default_value = "10")] prefixes: usize, }, - #[command(name = "sync-state")] - SyncState, #[command(name = "sync-notes")] SyncNotes, #[command(name = "sync-transactions")] @@ -83,6 +81,12 @@ pub enum Endpoint { #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "100")] block_range: u32, }, + #[command(name = "sync-chain-mmr")] + SyncChainMmr { + /// Block range size for each request (number of blocks to query). + #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "1000")] + block_range: u32, + }, #[command(name = "load-state")] LoadState, } @@ -111,9 +115,6 @@ async fn main() { Endpoint::SyncNullifiers { prefixes } => { bench_sync_nullifiers(data_directory, iterations, concurrency, prefixes).await; }, - Endpoint::SyncState => { - bench_sync_state(data_directory, iterations, concurrency).await; - }, Endpoint::SyncNotes => { bench_sync_notes(data_directory, iterations, concurrency).await; }, @@ -127,6 +128,9 @@ async fn main() { ) .await; }, + Endpoint::SyncChainMmr { block_range } => { + bench_sync_chain_mmr(data_directory, iterations, concurrency, block_range).await; + }, Endpoint::LoadState => { load_state(&data_directory).await; }, diff --git a/bin/stress-test/src/seeding/metrics.rs b/bin/stress-test/src/seeding/metrics.rs index cdf32965ab..56e89e4a95 100644 --- a/bin/stress-test/src/seeding/metrics.rs +++ b/bin/stress-test/src/seeding/metrics.rs @@ -76,7 +76,7 @@ impl SeedingMetrics { } /// Prints the block metrics table. - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn print_block_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "\nBlock metrics:")?; writeln!(f, "Note: Each block contains 256 transactions (16 batches * 16 transactions).")?; @@ -189,7 +189,7 @@ impl SeedingMetrics { } impl Display for SeedingMetrics { - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!( f, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index e0fe79338f..777ac993e7 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -5,19 +5,18 @@ use std::time::{Duration, Instant}; use metrics::SeedingMetrics; use miden_air::ExecutionProof; -use miden_block_prover::LocalBlockProver; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_store::{DataDirectory, GenesisState, Store}; use miden_node_utils::tracing::grpc::OtelInterceptor; +use miden_protocol::account::auth::AuthScheme; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, AccountBuilder, AccountDelta, AccountId, - AccountStorage, AccountStorageMode, AccountType, }; @@ -30,6 +29,7 @@ use miden_protocol::block::{ FeeParameters, ProposedBlock, ProvenBlock, + SignedBlock, }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; use miden_protocol::crypto::dsa::falcon512_rpo::{PublicKey, SecretKey}; @@ -47,10 +47,10 @@ use miden_protocol::transaction::{ }; use miden_protocol::utils::Serializable; use miden_protocol::{Felt, ONE, Word}; -use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::account::auth::AuthSingleSig; use miden_standards::account::faucets::BasicFungibleFaucet; use miden_standards::account::wallets::BasicWallet; -use miden_standards::note::create_p2id_note; +use miden_standards::note::P2idNote; use rand::Rng; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use rayon::prelude::ParallelSlice; @@ -93,7 +93,9 @@ pub async fn seed_store( let fee_params = FeeParameters::new(faucet.id(), 0).unwrap(); let signer = EcdsaSecretKey::new(); let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1, signer); - Store::bootstrap(genesis_state.clone(), &data_directory).expect("store should bootstrap"); + Store::bootstrap(genesis_state.clone(), &data_directory) + .await + .expect("store should bootstrap"); // start the store let (_, store_url) = start_store(data_directory.clone()).await; @@ -103,7 +105,7 @@ pub async fn seed_store( let accounts_filepath = data_directory.join(ACCOUNTS_FILENAME); let data_directory = miden_node_store::DataDirectory::load(data_directory).expect("data directory should exist"); - let genesis_header = genesis_state.into_block().unwrap().into_inner(); + let genesis_header = genesis_state.into_block().await.unwrap().into_inner(); let metrics = generate_blocks( num_accounts, public_accounts_percentage, @@ -145,7 +147,7 @@ async fn generate_blocks( let mut consume_notes_txs = vec![]; let consumes_per_block = TRANSACTIONS_PER_BATCH * BATCHES_PER_BLOCK - 1; - #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] + #[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] let num_public_accounts = (consumes_per_block as f64 * (f64::from(public_accounts_percentage) / 100.0)) .round() as usize; @@ -161,7 +163,7 @@ async fn generate_blocks( SecretKey::with_rng(&mut *rng) }; - let mut prev_block = genesis_block.clone(); + let mut prev_block_header = genesis_block.header().clone(); let mut current_anchor_header = genesis_block.header().clone(); for i in 0..total_blocks { @@ -193,7 +195,7 @@ async fn generate_blocks( note_nullifiers.extend(notes.iter().map(|n| n.nullifier().prefix())); // create the tx that creates the notes - let emit_note_tx = create_emit_note_tx(prev_block.header(), &mut faucet, notes.clone()); + let emit_note_tx = create_emit_note_tx(&prev_block_header, &mut faucet, notes.clone()); // collect all the txs block_txs.push(emit_note_tx); @@ -202,27 +204,23 @@ async fn generate_blocks( // create the batches with [TRANSACTIONS_PER_BATCH] txs each let batches: Vec = block_txs .par_chunks(TRANSACTIONS_PER_BATCH) - .map(|txs| create_batch(txs, prev_block.header())) + .map(|txs| create_batch(txs, &prev_block_header)) .collect(); // create the block and send it to the store let block_inputs = get_block_inputs(store_client, &batches, &mut metrics).await; // update blocks - prev_block = apply_block(batches, block_inputs, store_client, &mut metrics).await; - if current_anchor_header.block_epoch() != prev_block.header().block_epoch() { - current_anchor_header = prev_block.header().clone(); + prev_block_header = apply_block(batches, block_inputs, store_client, &mut metrics).await; + if current_anchor_header.block_epoch() != prev_block_header.block_epoch() { + current_anchor_header = prev_block_header.clone(); } // create the consume notes txs to be used in the next block let batch_inputs = - get_batch_inputs(store_client, prev_block.header(), ¬es, &mut metrics).await; - consume_notes_txs = create_consume_note_txs( - prev_block.header(), - accounts, - notes, - &batch_inputs.note_proofs, - ); + get_batch_inputs(store_client, &prev_block_header, ¬es, &mut metrics).await; + consume_notes_txs = + create_consume_note_txs(&prev_block_header, accounts, notes, &batch_inputs.note_proofs); // track store size every 50 blocks if i % 50 == 0 { @@ -248,21 +246,21 @@ async fn apply_block( block_inputs: BlockInputs, store_client: &StoreClient, metrics: &mut SeedingMetrics, -) -> ProvenBlock { - let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); +) -> BlockHeader { + let proposed_block = ProposedBlock::new(block_inputs, batches).unwrap(); let (header, body) = proposed_block.clone().into_header_and_body().unwrap(); - let block_proof = LocalBlockProver::new(0) - .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) - .unwrap(); + let block_size: usize = header.to_bytes().len() + body.to_bytes().len(); let signature = EcdsaSecretKey::new().sign(header.commitment()); - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - let block_size: usize = proven_block.to_bytes().len(); + // SAFETY: The header, body, and signature are known to correspond to each other. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + let ordered_batches = proposed_block.batches().clone(); let start = Instant::now(); - store_client.apply_block(&proven_block).await.unwrap(); + store_client.apply_block(&ordered_batches, &signed_block).await.unwrap(); metrics.track_block_insertion(start.elapsed(), block_size); - proven_block + let (header, ..) = signed_block.into_parts(); + header } // HELPER FUNCTIONS @@ -310,7 +308,7 @@ fn create_accounts_and_notes( /// specified `faucet_id` and sent to the specified target account. fn create_note(faucet_id: AccountId, target_id: AccountId, rng: &mut RpoRandomCoin) -> Note { let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 10).unwrap()); - create_p2id_note( + P2idNote::create( faucet_id, target_id, vec![asset], @@ -328,7 +326,7 @@ fn create_account(public_key: PublicKey, index: u64, storage_mode: AccountStorag AccountBuilder::new(init_seed.try_into().unwrap()) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(storage_mode) - .with_auth_component(AuthFalcon512Rpo::new(public_key.into())) + .with_auth_component(AuthSingleSig::new(public_key.into(), AuthScheme::Falcon512Rpo)) .with_component(BasicWallet) .build() .unwrap() @@ -346,7 +344,10 @@ fn create_faucet() -> Account { .account_type(AccountType::FungibleFaucet) .storage_mode(AccountStorageMode::Private) .with_component(BasicFungibleFaucet::new(token_symbol, 2, Felt::new(u64::MAX)).unwrap()) - .with_auth_component(AuthFalcon512Rpo::new(key_pair.public_key().into())) + .with_auth_component(AuthSingleSig::new( + key_pair.public_key().into(), + AuthScheme::Falcon512Rpo, + )) .build() .unwrap() } @@ -366,7 +367,7 @@ fn create_batch(txs: &[ProvenTransaction], block_ref: &BlockHeader) -> ProvenBat account_updates, InputNotes::new(input_notes).unwrap(), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(txs.iter().map(TransactionHeader::from).collect()), ) .unwrap() @@ -420,7 +421,7 @@ fn create_consume_note_tx( ProvenTransactionBuilder::new( account.id(), init_hash, - account.commitment(), + account.to_commitment(), account_delta_commitment, block_ref.block_num(), block_ref.commitment(), @@ -441,13 +442,13 @@ fn create_emit_note_tx( faucet: &mut Account, output_notes: Vec, ) -> ProvenTransaction { - let initial_account_hash = faucet.commitment(); + let initial_account_hash = faucet.to_commitment(); - let metadata_slot_name = AccountStorage::faucet_sysdata_slot(); + let metadata_slot_name = BasicFungibleFaucet::metadata_slot(); let slot = faucet.storage().get_item(metadata_slot_name).unwrap(); faucet .storage_mut() - .set_item(metadata_slot_name, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) + .set_item(metadata_slot_name, [slot[0] + Felt::new(10), slot[1], slot[2], slot[3]].into()) .unwrap(); faucet.increment_nonce(ONE).unwrap(); @@ -455,7 +456,7 @@ fn create_emit_note_tx( ProvenTransactionBuilder::new( faucet.id(), initial_account_hash, - faucet.commitment(), + faucet.to_commitment(), Word::empty(), block_ref.block_num(), block_ref.commitment(), @@ -522,6 +523,8 @@ async fn get_block_inputs( /// Runs the store with the given data directory. Returns a tuple with: /// - a gRPC client to access the store /// - the URL of the store +/// +/// The store uses a local prover. pub async fn start_store( data_directory: PathBuf, ) -> (RpcClient>, Url) { @@ -543,6 +546,7 @@ pub async fn start_store( task::spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, diff --git a/bin/stress-test/src/store/metrics.rs b/bin/stress-test/src/store/metrics.rs index 95f8ce0ffd..b56f362643 100644 --- a/bin/stress-test/src/store/metrics.rs +++ b/bin/stress-test/src/store/metrics.rs @@ -18,7 +18,7 @@ pub fn print_summary(timers_accumulator: &[Duration]) { } /// Computes a percentile from a list of durations. -#[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] +#[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] fn compute_percentile(times: &[Duration], percentile: f64) -> Duration { if times.is_empty() { return Duration::ZERO; diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index fa39303aed..314a5e95d0 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -24,9 +24,6 @@ mod metrics; // CONSTANTS // ================================================================================================ -/// Number of accounts used in each `sync_state` call. -const ACCOUNTS_PER_SYNC_STATE: usize = 5; - /// Number of accounts used in each `sync_notes` call. const ACCOUNTS_PER_SYNC_NOTES: usize = 15; @@ -36,77 +33,6 @@ const NOTE_IDS_PER_NULLIFIERS_CHECK: usize = 20; /// Number of attempts the benchmark will make to reach the store before proceeding. const STORE_STATUS_RETRIES: usize = 10; -// SYNC STATE -// ================================================================================================ - -/// Sends multiple `sync_state` requests to the store and prints the performance. -/// -/// Arguments: -/// - `data_directory`: directory that contains the database dump file and the accounts ids dump -/// file. -/// - `iterations`: number of requests to send. -/// - `concurrency`: number of requests to send in parallel. -pub async fn bench_sync_state(data_directory: PathBuf, iterations: usize, concurrency: usize) { - // load accounts from the dump file - let accounts_file = data_directory.join(ACCOUNTS_FILENAME); - let accounts = fs::read_to_string(&accounts_file) - .await - .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); - let mut account_ids = accounts.lines().map(|a| AccountId::from_hex(a).unwrap()).cycle(); - - let (store_client, _) = start_store(data_directory).await; - - wait_for_store(&store_client).await.unwrap(); - - // each request will have 5 account ids, 5 note tags and will be sent with block number 0 - let request = |_| { - let mut client = store_client.clone(); - let account_batch: Vec = - account_ids.by_ref().take(ACCOUNTS_PER_SYNC_STATE).collect(); - tokio::spawn(async move { sync_state(&mut client, account_batch, 0).await }) - }; - - // create a stream of tasks to send sync_notes requests - let (timers_accumulator, responses) = stream::iter(0..iterations) - .map(request) - .buffer_unordered(concurrency) - .map(|res| res.unwrap()) - .collect::<(Vec<_>, Vec<_>)>() - .await; - - print_summary(&timers_accumulator); - - #[allow(clippy::cast_precision_loss)] - let average_notes_per_response = - responses.iter().map(|r| r.notes.len()).sum::() as f64 / responses.len() as f64; - println!("Average notes per response: {average_notes_per_response}"); -} - -/// Sends a single `sync_state` request to the store and returns a tuple with: -/// - the elapsed time. -/// - the response. -pub async fn sync_state( - api_client: &mut RpcClient>, - account_ids: Vec, - block_num: u32, -) -> (Duration, proto::rpc::SyncStateResponse) { - let note_tags = account_ids - .iter() - .map(|id| u32::from(NoteTag::with_account_target(*id))) - .collect::>(); - - let account_ids = account_ids - .iter() - .map(|id| proto::account::AccountId { id: id.to_bytes() }) - .collect::>(); - - let sync_request = proto::rpc::SyncStateRequest { block_num, note_tags, account_ids }; - - let start = Instant::now(); - let response = api_client.sync_state(sync_request).await.unwrap(); - (start.elapsed(), response.into_inner()) -} - // SYNC NOTES // ================================================================================================ @@ -197,61 +123,68 @@ pub async fn bench_sync_nullifiers( .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); let account_ids: Vec = accounts .lines() - .take(ACCOUNTS_PER_SYNC_STATE) + .take(ACCOUNTS_PER_SYNC_NOTES) .map(|a| AccountId::from_hex(a).unwrap()) .collect(); - // get all nullifier prefixes from the store + // Get all nullifier prefixes from the store using sync_notes let mut nullifier_prefixes: Vec = vec![]; let mut current_block_num = 0; loop { - // get the accounts notes - let (_, response) = - sync_state(&mut store_client, account_ids.clone(), current_block_num).await; + // Get the accounts notes using sync_notes + let note_tags: Vec = account_ids + .iter() + .map(|id| u32::from(NoteTag::with_account_target(*id))) + .collect(); + let sync_request = proto::rpc::SyncNotesRequest { + block_range: Some(proto::rpc::BlockRange { + block_from: current_block_num, + block_to: None, + }), + note_tags, + }; + let response = store_client.sync_notes(sync_request).await.unwrap().into_inner(); + let note_ids = response .notes .iter() .map(|n| n.note_id.unwrap()) .collect::>(); - // get the notes nullifiers, limiting to 20 notes maximum + // Get the notes nullifiers, limiting to 20 notes maximum let note_ids_to_fetch = note_ids.iter().take(NOTE_IDS_PER_NULLIFIERS_CHECK).copied().collect::>(); - let notes = store_client - .get_notes_by_id(proto::note::NoteIdList { ids: note_ids_to_fetch }) - .await - .unwrap() - .into_inner() - .notes; - - nullifier_prefixes.extend( - notes - .iter() - .filter_map(|n| { - // private notes are filtered out because `n.details` is None - let details_bytes = n.note.as_ref()?.details.as_ref()?; - let details = NoteDetails::read_from_bytes(details_bytes).unwrap(); - Some(u32::from(details.nullifier().prefix())) - }) - .collect::>(), - ); + if !note_ids_to_fetch.is_empty() { + let notes = store_client + .get_notes_by_id(proto::note::NoteIdList { ids: note_ids_to_fetch }) + .await + .unwrap() + .into_inner() + .notes; + + nullifier_prefixes.extend( + notes + .iter() + .filter_map(|n| { + // Private notes are filtered out because `n.details` is None + let details_bytes = n.note.as_ref()?.details.as_ref()?; + let details = NoteDetails::read_from_bytes(details_bytes).unwrap(); + Some(u32::from(details.nullifier().prefix())) + }) + .collect::>(), + ); + } - // Use the response from the first chunk to update block number - // (all chunks should return the same block header for the same block_num) - let (_, first_response) = sync_state( - &mut store_client, - account_ids[..1000.min(account_ids.len())].to_vec(), - current_block_num, - ) - .await; - current_block_num = first_response.block_header.unwrap().block_num; - if first_response.chain_tip == current_block_num { + // Update block number from pagination info + let pagination_info = response.pagination_info.expect("pagination_info should exist"); + current_block_num = pagination_info.block_num; + if pagination_info.chain_tip == current_block_num { break; } } let mut nullifiers = nullifier_prefixes.into_iter().cycle(); - // each request will have `prefixes_per_request` prefixes and block number 0 + // Each request will have `prefixes_per_request` prefixes and block number 0 let request = |_| { let mut client = store_client.clone(); @@ -260,7 +193,7 @@ pub async fn bench_sync_nullifiers( tokio::spawn(async move { sync_nullifiers(&mut client, nullifiers_batch).await }) }; - // create a stream of tasks to send the requests + // Create a stream of tasks to send the requests let (timers_accumulator, responses) = stream::iter(0..iterations) .map(request) .buffer_unordered(concurrency) @@ -270,7 +203,7 @@ pub async fn bench_sync_nullifiers( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_nullifiers_per_response = responses.iter().map(|r| r.nullifiers.len()).sum::() as f64 / responses.len() as f64; println!("Average nullifiers per response: {average_nullifiers_per_response}"); @@ -364,7 +297,7 @@ pub async fn bench_sync_transactions( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_transactions_per_response = if responses.is_empty() { 0.0 } else { @@ -376,13 +309,13 @@ pub async fn bench_sync_transactions( // Calculate pagination statistics let total_runs = results.len(); let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let pagination_rate = if total_runs > 0 { (paginated_runs as f64 / total_runs as f64) * 100.0 } else { 0.0 }; - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let avg_pages = if total_runs > 0 { results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 } else { @@ -481,6 +414,76 @@ async fn sync_transactions_paginated( } } +// SYNC CHAIN MMR +// ================================================================================================ + +/// Sends multiple `sync_chain_mmr` requests to the store and prints the performance. +/// +/// Arguments: +/// - `data_directory`: directory that contains the database dump file. +/// - `iterations`: number of requests to send. +/// - `concurrency`: number of requests to send in parallel. +/// - `block_range_size`: number of blocks to include per request. +pub async fn bench_sync_chain_mmr( + data_directory: PathBuf, + iterations: usize, + concurrency: usize, + block_range_size: u32, +) { + let (store_client, _) = start_store(data_directory).await; + + wait_for_store(&store_client).await.unwrap(); + + let chain_tip = store_client.clone().status(()).await.unwrap().into_inner().chain_tip; + let block_range_size = block_range_size.max(1); + + let request = |_| { + let mut client = store_client.clone(); + tokio::spawn(async move { sync_chain_mmr(&mut client, chain_tip, block_range_size).await }) + }; + + let results = stream::iter(0..iterations) + .map(request) + .buffer_unordered(concurrency) + .map(|res| res.unwrap()) + .collect::>() + .await; + + let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); + + print_summary(&timers_accumulator); + + let total_runs = results.len(); + + println!("Pagination statistics:"); + println!(" Total runs: {total_runs}"); +} + +/// Sends a single `sync_chain_mmr` request to the store and returns a tuple with: +/// - the elapsed time. +/// - the response. +async fn sync_chain_mmr( + api_client: &mut RpcClient>, + block_from: u32, + block_to: u32, +) -> SyncChainMmrRun { + let sync_request = proto::rpc::SyncChainMmrRequest { + block_range: Some(proto::rpc::BlockRange { block_from, block_to: Some(block_to) }), + }; + + let start = Instant::now(); + let response = api_client.sync_chain_mmr(sync_request).await.unwrap(); + let elapsed = start.elapsed(); + let response = response.into_inner(); + let _mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); + SyncChainMmrRun { duration: elapsed } +} + +#[derive(Clone)] +struct SyncChainMmrRun { + duration: Duration, +} + // LOAD STATE // ================================================================================================ diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index e5e5511ad1..6ca345217a 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -22,16 +22,14 @@ tracing-forest = ["miden-node-utils/tracing-forest"] anyhow = { workspace = true } futures = { workspace = true } itertools = { workspace = true } -miden-block-prover = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["batch-prover", "block-prover"], workspace = true } miden-standards = { workspace = true } -miden-tx = { default-features = true, workspace = true } miden-tx-batch-prover = { workspace = true } -rand = { version = "0.9" } +rand = { workspace = true } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } @@ -46,13 +44,14 @@ assert_matches = { workspace = true } miden-node-store = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } +miden-node-validator = { workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } miden-tx = { features = ["testing"], workspace = true } pretty_assertions = "1.4" -rand_chacha = { default-features = false, version = "0.9" } +rand_chacha = { default-features = false, workspace = true } rstest = { workspace = true } serial_test = "3.2" -tempfile = { version = "3.20" } +tempfile = { workspace = true } tokio = { features = ["test-util"], workspace = true } winterfell = { version = "0.13" } diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index e3cc714c2a..34dab83a3f 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -9,7 +9,7 @@ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{BatchId, ProposedBatch, ProvenBatch}; -use miden_remote_prover_client::remote_prover::batch_prover::RemoteBatchProver; +use miden_remote_prover_client::RemoteBatchProver; use miden_tx_batch_prover::LocalBatchProver; use rand::Rng; use tokio::task::JoinSet; diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index a3a36ec4f0..56b5a3666f 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,29 +1,15 @@ -use std::ops::{Deref, Range}; +use std::ops::Deref; use std::sync::Arc; use anyhow::Context; use futures::FutureExt; -use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{OrderedBatches, ProvenBatch}; -use miden_protocol::block::{ - BlockBody, - BlockHeader, - BlockInputs, - BlockNumber, - BlockProof, - ProposedBlock, - ProvenBlock, -}; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock, SignedBlock}; use miden_protocol::note::NoteHeader; -use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; -use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; -use rand::Rng; +use miden_protocol::transaction::TransactionHeader; use tokio::time::Duration; -use tracing::{Span, info, instrument}; -use url::Url; +use tracing::{Span, instrument}; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; @@ -35,21 +21,19 @@ use crate::{COMPONENT, TelemetryInjectorExt}; // ================================================================================================= pub struct BlockBuilder { + /// The frequency at which blocks are produced. pub block_interval: Duration, - /// Used to simulate block proving by sleeping for a random duration selected from this range. - pub simulated_proof_time: Range, /// Simulated block failure rate as a percentage. /// /// Note: this _must_ be sign positive and less than 1.0. pub failure_rate: f64, + /// The store RPC client for committing blocks. pub store: StoreClient, + /// The validator RPC client for validating blocks. pub validator: BlockProducerValidatorClient, - - /// The prover used to prove a proposed block into a proven block. - pub block_prover: BlockProver, } impl BlockBuilder { @@ -59,20 +43,12 @@ impl BlockBuilder { pub fn new( store: StoreClient, validator: BlockProducerValidatorClient, - block_prover_url: Option, block_interval: Duration, ) -> Self { - let block_prover = match block_prover_url { - Some(url) => BlockProver::new_remote(url), - None => BlockProver::new_local(MIN_PROOF_SECURITY_LEVEL), - }; - Self { block_interval, // Note: The range cannot be empty. - simulated_proof_time: Duration::ZERO..Duration::from_millis(1), failure_rate: 0.0, - block_prover, store, validator, } @@ -136,16 +112,11 @@ impl BlockBuilder { self.get_block_inputs(selected) .inspect_ok(BlockBatchesAndInputs::inject_telemetry) .and_then(|inputs| self.propose_block(inputs)) - .inspect_ok(|(proposed_block, _)| { + .inspect_ok(|proposed_block| { ProposedBlock::inject_telemetry(proposed_block); }) - .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) - .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) - .inspect_ok(ProvenBlock::inject_telemetry) - // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot - // handle errors after it considers the process complete (which makes sense). - .and_then(|proven_block| async { self.inject_failure(proven_block) }) - .and_then(|proven_block| self.commit_block(mempool, proven_block)) + .and_then(|proposed_block| self.build_and_validate_block(proposed_block)) + .and_then(|(ordered_batches, signed_block)| self.commit_block(mempool, ordered_batches, signed_block)) // Handle errors by propagating the error to the root span and rolling back the block. .inspect_err(|err| Span::current().set_error(err)) .or_else(|err| async { @@ -239,23 +210,21 @@ impl BlockBuilder { async fn propose_block( &self, batches_inputs: BlockBatchesAndInputs, - ) -> Result<(ProposedBlock, BlockInputs), BuildBlockError> { + ) -> Result { let BlockBatchesAndInputs { batches, inputs } = batches_inputs; let batches = batches.into_iter().map(Arc::unwrap_or_clone).collect(); - let proposed_block = ProposedBlock::new(inputs.clone(), batches) - .map_err(BuildBlockError::ProposeBlockFailed)?; + let proposed_block = + ProposedBlock::new(inputs, batches).map_err(BuildBlockError::ProposeBlockFailed)?; - Ok((proposed_block, inputs)) + Ok(proposed_block) } #[instrument(target = COMPONENT, name = "block_builder.validate_block", skip_all, err)] - async fn validate_block( + async fn build_and_validate_block( &self, proposed_block: ProposedBlock, - block_inputs: BlockInputs, - ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> - { + ) -> Result<(OrderedBatches, SignedBlock), BuildBlockError> { // Concurrently build the block and validate it via the validator. let build_result = tokio::task::spawn_blocking({ let proposed_block = proposed_block.clone(); @@ -278,53 +247,27 @@ impl BlockBuilder { } let (ordered_batches, ..) = proposed_block.into_parts(); - Ok((ordered_batches, block_inputs, header, signature, body)) - } - - #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] - async fn prove_block( - &self, - ordered_batches: OrderedBatches, - block_inputs: BlockInputs, - header: BlockHeader, - signature: Signature, - body: BlockBody, - ) -> Result { - // Prove block using header and body from validator. - let block_proof = self - .block_prover - .prove(ordered_batches.clone(), header.clone(), block_inputs) - .await?; - self.simulate_proving().await; - - // SAFETY: The header and body are assumed valid and consistent with the proof. - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { - return Err(BuildBlockError::SecurityLevelTooLow( - proven_block.proof_security_level(), - MIN_PROOF_SECURITY_LEVEL, - )); - } - // TODO(sergerad): Consider removing this validation. Once block proving is implemented, - // this would be replaced with verifying the proof returned from the prover against - // the block header. - validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; - - Ok(proven_block) + // SAFETY: The header, body, and signature are known to correspond to each other because the + // header and body are derived from the proposed block and the signature is verified + // against the corresponding commitment. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + Ok((ordered_batches, signed_block)) } #[instrument(target = COMPONENT, name = "block_builder.commit_block", skip_all, err)] async fn commit_block( &self, mempool: &SharedMempool, - built_block: ProvenBlock, + ordered_batches: OrderedBatches, + signed_block: SignedBlock, ) -> Result<(), BuildBlockError> { self.store - .apply_block(&built_block) + .apply_block(&ordered_batches, &signed_block) .await .map_err(BuildBlockError::StoreApplyBlockFailed)?; - mempool.lock().await.commit_block(built_block.header().clone()); + let (header, ..) = signed_block.into_parts(); + mempool.lock().await.commit_block(header); Ok(()) } @@ -333,31 +276,6 @@ impl BlockBuilder { async fn rollback_block(&self, mempool: &SharedMempool, block: BlockNumber) { mempool.lock().await.rollback_block(block); } - - #[instrument(target = COMPONENT, name = "block_builder.simulate_proving", skip_all)] - async fn simulate_proving(&self) { - let proving_duration = rand::rng().random_range(self.simulated_proof_time.clone()); - - Span::current().set_attribute("range.min_s", self.simulated_proof_time.start); - Span::current().set_attribute("range.max_s", self.simulated_proof_time.end); - Span::current().set_attribute("dice_roll_s", proving_duration); - - tokio::time::sleep(proving_duration).await; - } - - #[instrument(target = COMPONENT, name = "block_builder.inject_failure", skip_all, err)] - fn inject_failure(&self, value: T) -> Result { - let roll = rand::rng().random::(); - - Span::current().set_attribute("failure_rate", self.failure_rate); - Span::current().set_attribute("dice_roll", roll); - - if roll < self.failure_rate { - Err(BuildBlockError::InjectedFailure) - } else { - Ok(value) - } - } } /// A wrapper around batches selected for inlucion in a block, primarily used to be able to inject @@ -454,76 +372,3 @@ impl TelemetryInjectorExt for ProvenBlock { span.set_attribute("block.commitments.transaction", header.tx_commitment()); } } - -// BLOCK PROVER -// ================================================================================================ - -pub enum BlockProver { - Local(LocalBlockProver), - Remote(RemoteBlockProver), -} - -impl BlockProver { - pub fn new_local(security_level: u32) -> Self { - info!(target: COMPONENT, "Using local block prover"); - Self::Local(LocalBlockProver::new(security_level)) - } - - pub fn new_remote(endpoint: impl Into) -> Self { - info!(target: COMPONENT, "Using remote block prover"); - Self::Remote(RemoteBlockProver::new(endpoint)) - } - - #[instrument(target = COMPONENT, skip_all, err)] - async fn prove( - &self, - tx_batches: OrderedBatches, - block_header: BlockHeader, - block_inputs: BlockInputs, - ) -> Result { - match self { - Self::Local(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .map_err(BuildBlockError::ProveBlockFailed), - Self::Remote(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .await - .map_err(BuildBlockError::RemoteProverClientError), - } - } -} - -/// Validates that the proven block's transaction headers are consistent with the transactions -/// passed in the proposed block. -/// -/// This expects that transactions from the proposed block and proven block are in the same -/// order, as defined by [`OrderedTransactionHeaders`]. -fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, -) -> Result<(), BuildBlockError> { - if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { - return Err(BuildBlockError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.body().transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in proposed_txs - .as_slice() - .iter() - .zip(proven_block.body().transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(BuildBlockError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) -} diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 5b2ab30b32..f581ca95e8 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -1,5 +1,3 @@ -#![allow(dead_code, reason = "WIP: mempoool refactor")] - use std::collections::HashSet; use std::sync::Arc; @@ -127,10 +125,6 @@ impl AuthenticatedTransaction { Arc::clone(&self.inner) } - pub fn raw_proven_transaction(&self) -> &ProvenTransaction { - &self.inner - } - pub fn expires_at(&self) -> BlockNumber { self.inner.expiration_block_num() } @@ -177,4 +171,8 @@ impl AuthenticatedTransaction { self.store_account_state = None; self } + + pub fn raw_proven_transaction(&self) -> &ProvenTransaction { + &self.inner + } } diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 40c74c99f5..b610b0534a 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,6 +1,5 @@ use core::error::Error as CoreError; -use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -223,16 +222,10 @@ pub enum BuildBlockError { ValidateBlockFailed(#[source] Box), #[error("block signature is invalid")] InvalidSignature, - #[error("failed to prove block")] - ProveBlockFailed(#[source] BlockProverError), + /// We sometimes randomly inject errors into the batch building process to test our failure /// responses. - #[error("nothing actually went wrong, failure was injected on purpose")] - InjectedFailure, - #[error("failed to prove block with remote prover")] - RemoteProverClientError(#[source] RemoteProverClientError), - #[error("block proof security level is too low: {0} < {1}")] - SecurityLevelTooLow(u32, u32), + /// Custom error variant for errors not covered by the other variants. #[error("{error_msg}")] Other { diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 36ab9b53d1..955aa23565 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -60,7 +60,7 @@ pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(1); /// /// The value is selected such that all transactions should approximately be processed within one /// minutes with a block time of 5s. -#[allow(clippy::cast_sign_loss, reason = "Both durations are positive")] +#[expect(clippy::cast_sign_loss, reason = "Both durations are positive")] pub const DEFAULT_MEMPOOL_TX_CAPACITY: NonZeroUsize = NonZeroUsize::new( DEFAULT_MAX_BATCHES_PER_BLOCK * DEFAULT_MAX_TXS_PER_BATCH diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index 461a836c25..c41e305fab 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -416,7 +416,7 @@ mod tests { BTreeMap::from([(account_update.account_id(), account_update)]), InputNotes::default(), Vec::default(), - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(vec![tx_header]), ) .unwrap(); diff --git a/crates/block-producer/src/mempool/subscription.rs b/crates/block-producer/src/mempool/subscription.rs index 6bfbf7eaa0..8d0eb90943 100644 --- a/crates/block-producer/src/mempool/subscription.rs +++ b/crates/block-producer/src/mempool/subscription.rs @@ -2,9 +2,9 @@ use std::collections::{BTreeMap, HashSet}; use std::ops::Mul; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::NetworkNote; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::transaction::{OutputNote, TransactionId}; +use miden_standards::note::NetworkNoteExt; use tokio::sync::mpsc; use crate::domain::transaction::AuthenticatedTransaction; @@ -83,7 +83,13 @@ impl SubscriptionProvider { let network_notes = tx .output_notes() .filter_map(|note| match note { - OutputNote::Full(inner) => NetworkNote::try_from(inner.clone()).ok(), + // We check first to avoid cloning non-network notes. + OutputNote::Full(inner) => inner.is_network_note().then_some( + inner + .clone() + .into_account_target_network_note() + .expect("we just checked that this is a network note"), + ), _ => None, }) .collect(); diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 8245c1ee6b..d7ea49db07 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -40,6 +40,9 @@ use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BUILDERS}; +#[cfg(test)] +mod tests; + /// The block producer server. /// /// Specifies how to connect to the store, batch prover, and block prover components. @@ -55,8 +58,6 @@ pub struct BlockProducer { pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, - /// The address of the block prover component. - pub block_prover_url: Option, /// The interval at which to produce batches. pub batch_interval: Duration, /// The interval at which to produce blocks. @@ -82,7 +83,6 @@ impl BlockProducer { /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is /// encountered. - #[allow(clippy::too_many_lines)] pub async fn serve(self) -> anyhow::Result<()> { info!(target: COMPONENT, endpoint=?self.block_producer_address, store=%self.store_url, "Initializing server"); let store = StoreClient::new(self.store_url.clone()); @@ -123,8 +123,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); - let block_builder = - BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); + let block_builder = BlockBuilder::new(store.clone(), validator, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 453512597b..63aa983db2 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -1,27 +1,24 @@ +use std::num::NonZeroUsize; use std::time::Duration; -use miden_air::{ExecutionProof, HashFunction}; -use miden_node_proto::generated::{ - self as proto, block_producer::api_client as block_producer_client, -}; +use miden_node_proto::generated::block_producer::api_client as block_producer_client; use miden_node_store::{GenesisState, Store}; -use miden_protocol::{ - Digest, - account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}, - transaction::ProvenTransactionBuilder, -}; -use miden_tx::utils::Serializable; -use tokio::{net::TcpListener, runtime, task, time::sleep}; +use miden_node_utils::fee::test_fee_params; +use miden_node_validator::{Validator, ValidatorSigner}; +use miden_protocol::testing::random_secret_key::random_secret_key; +use tokio::net::TcpListener; +use tokio::time::sleep; +use tokio::{runtime, task}; use tonic::transport::{Channel, Endpoint}; -use winterfell::Proof; +use url::Url; -use crate::{BlockProducer, SERVER_MAX_BATCHES_PER_BLOCK, SERVER_MAX_TXS_PER_BATCH}; +use crate::{BlockProducer, DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH}; +/// Tests that the block producer starts up correctly even when the store is not initially +/// available. The block producer should retry with exponential backoff until the store becomes +/// available, then start serving requests. #[tokio::test] async fn block_producer_startup_is_robust_to_network_failures() { - // This test starts the block producer and tests that it starts serving only after the store - // is started. - // get the addresses for the store and block producer let store_addr = { let store_listener = @@ -36,113 +33,108 @@ async fn block_producer_startup_is_robust_to_network_failures() { .expect("Failed to get block-producer address") }; - let ntx_builder_addr = { - let ntx_builder_address = TcpListener::bind("127.0.0.1:0") - .await - .expect("failed to bind the ntx builder address"); - ntx_builder_address.local_addr().expect("failed to get ntx builder address") + let validator_addr = { + let validator_listener = + TcpListener::bind("127.0.0.1:0").await.expect("failed to bind validator"); + validator_listener.local_addr().expect("failed to get validator address") }; - // start the block producer + let grpc_timeout = Duration::from_secs(30); + + // start the validator + task::spawn(async move { + let temp_dir = tempfile::tempdir().expect("tempdir should be created"); + let data_directory = temp_dir.path().to_path_buf(); + Validator { + address: validator_addr, + grpc_timeout, + signer: ValidatorSigner::new_local(random_secret_key()), + data_directory, + } + .serve() + .await + .unwrap(); + }); + + // start the block producer BEFORE the store is available + // this tests the exponential backoff behavior + let store_url = Url::parse(&format!("http://{store_addr}")).expect("Failed to parse store URL"); + let validator_url = + Url::parse(&format!("http://{validator_addr}")).expect("Failed to parse validator URL"); task::spawn(async move { BlockProducer { block_producer_address: block_producer_addr, - store_address: store_addr, - ntx_builder_address: Some(ntx_builder_addr), + store_url, + validator_url, batch_prover_url: None, - block_prover_url: None, batch_interval: Duration::from_millis(500), block_interval: Duration::from_millis(500), - max_txs_per_batch: SERVER_MAX_TXS_PER_BATCH, - max_batches_per_block: SERVER_MAX_BATCHES_PER_BLOCK, + max_txs_per_batch: DEFAULT_MAX_TXS_PER_BATCH, + max_batches_per_block: DEFAULT_MAX_BATCHES_PER_BLOCK, + grpc_timeout, + mempool_tx_capacity: NonZeroUsize::new(100).unwrap(), } .serve() .await .unwrap(); }); - // test: connecting to the block producer should fail until the store is started + // test: connecting to the block producer should fail because the store is not yet started + // (and therefore the block producer is not yet listening) let block_producer_endpoint = Endpoint::try_from(format!("http://{block_producer_addr}")).expect("valid url"); let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await; - assert!(block_producer_client.is_err()); + assert!( + block_producer_client.is_err(), + "Block producer should not be available before store is started" + ); // start the store let data_directory = tempfile::tempdir().expect("tempdir should be created"); - let store_runtime = { - let genesis_state = GenesisState::new(vec![], 1, 1); - Store::bootstrap(genesis_state.clone(), data_directory.path()) - .expect("store should bootstrap"); - let dir = data_directory.path().to_path_buf(); - let rpc_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); - let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind store ntx-builder gRPC endpoint"); - let block_producer_listener = TcpListener::bind(store_addr) - .await - .expect("store should bind the block-producer port"); - // in order to later kill the store, we need to spawn a new runtime and run the store on - // it. That allows us to kill all the tasks spawned by the store when we - // kill the runtime. - let store_runtime = - runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); - store_runtime.spawn(async move { - Store { - rpc_listener, - ntx_builder_listener, - block_producer_listener, - data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + let store_runtime = start_store(store_addr, data_directory.path()).await; + + // wait for the block producer's exponential backoff to connect to the store + // use a retry loop since CI environments may be slower + let block_producer_client = { + let mut attempts = 0; + loop { + attempts += 1; + match block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await { + Ok(client) => break client, + Err(_) if attempts < 30 => { + sleep(Duration::from_millis(200)).await; + }, + Err(e) => panic!( + "block producer client should connect after store is started (after {attempts} attempts): {e}" + ), } - .serve() - .await - .expect("store should start serving"); - }); - store_runtime + } }; - // we need to wait for the exponential backoff of the block producer to connect to the store - sleep(Duration::from_secs(1)).await; + // test: status request against block-producer should succeed + let response = send_status_request(block_producer_client).await; + assert!(response.is_ok(), "Status request should succeed, got: {:?}", response.err()); - let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint) - .await - .expect("block producer client should connect"); + // verify the response contains expected data + let status = response.unwrap().into_inner(); + assert_eq!(status.status, "connected"); - // test: request against block-producer api should succeed - let response = send_request(block_producer_client.clone(), 0).await; - assert!(response.is_ok()); - - // kill the store + // Shutdown the store before data_directory is dropped to allow the database to flush properly shutdown_store(store_runtime).await; - - // test: request against block-producer api should fail immediately - let response = send_request(block_producer_client.clone(), 1).await; - assert!(response.is_err()); - - // test: restart the store and request should succeed - let store_runtime = restart_store(store_addr, data_directory.path()).await; - let response = send_request(block_producer_client.clone(), 2).await; - assert!(response.is_ok()); - - // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly - shutdown_store(store_runtime).await; -} - -/// Shuts down the store runtime properly to allow RocksDB to flush before the temp directory is -/// deleted. -async fn shutdown_store(store_runtime: runtime::Runtime) { - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) - .await - .expect("shutdown should complete"); } -/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. -async fn restart_store( +/// Starts the store with a fresh genesis state and returns the runtime handle. +async fn start_store( store_addr: std::net::SocketAddr, data_directory: &std::path::Path, ) -> runtime::Runtime { + let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, random_secret_key()); + Store::bootstrap(genesis_state.clone(), data_directory) + .await + .expect("store should bootstrap"); + + let dir = data_directory.to_path_buf(); let rpc_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") @@ -151,7 +143,8 @@ async fn restart_store( let block_producer_listener = TcpListener::bind(store_addr) .await .expect("store should bind the block-producer port"); - let dir = data_directory.to_path_buf(); + + // Use a separate runtime so we can kill all store tasks later let store_runtime = runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); store_runtime.spawn(async move { @@ -159,8 +152,9 @@ async fn restart_store( rpc_listener, ntx_builder_listener, block_producer_listener, + block_prover_url: None, data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + grpc_timeout: Duration::from_secs(30), } .serve() .await @@ -169,32 +163,17 @@ async fn restart_store( store_runtime } -/// Creates a dummy transaction and submits it to the block producer. -async fn send_request( +/// Shuts down the store runtime properly to allow the database to flush before the temp directory +/// is deleted. +async fn shutdown_store(store_runtime: runtime::Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); +} + +/// Sends a status request to the block producer to verify connectivity. +async fn send_status_request( mut client: block_producer_client::ApiClient, - i: u8, -) -> Result, tonic::Status> -{ - let tx = ProvenTransactionBuilder::new( - AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Private, - ), - Digest::default(), - [i; 32].try_into().unwrap(), - Digest::default(), - 0.into(), - Digest::default(), - u32::MAX.into(), - ExecutionProof::new(Proof::new_dummy(), HashFunction::default()), - ) - .build() - .unwrap(); - let request = proto::transaction::ProvenTransaction { - transaction: tx.to_bytes(), - transaction_replay: None, - }; - client.submit_proven_transaction(request).await +) -> Result, tonic::Status> { + client.status(()).await } diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index a82a60582d..fb20bc160e 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -10,7 +10,8 @@ use miden_node_proto::{AccountState, generated as proto}; use miden_node_utils::formatting::format_opt; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, SignedBlock}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::Serializable; @@ -238,8 +239,15 @@ impl StoreClient { } #[instrument(target = COMPONENT, name = "store.client.apply_block", skip_all, err)] - pub async fn apply_block(&self, block: &ProvenBlock) -> Result<(), StoreError> { - let request = tonic::Request::new(proto::blockchain::Block { block: block.to_bytes() }); + pub async fn apply_block( + &self, + ordered_batches: &OrderedBatches, + signed_block: &SignedBlock, + ) -> Result<(), StoreError> { + let request = tonic::Request::new(proto::store::ApplyBlockRequest { + ordered_batches: ordered_batches.to_bytes(), + block: Some(signed_block.into()), + }); self.client.clone().apply_block(request).await.map(|_| ()).map_err(Into::into) } diff --git a/crates/block-producer/src/test_utils/batch.rs b/crates/block-producer/src/test_utils/batch.rs index ecbd215863..ca705e241e 100644 --- a/crates/block-producer/src/test_utils/batch.rs +++ b/crates/block-producer/src/test_utils/batch.rs @@ -66,7 +66,7 @@ impl TransactionBatchConstructor for ProvenBatch { account_updates, InputNotes::new_unchecked(input_notes), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked( txs.into_iter().map(TransactionHeader::from).collect(), ), diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml new file mode 100644 index 0000000000..2a42af4305 --- /dev/null +++ b/crates/db/Cargo.toml @@ -0,0 +1,23 @@ +[package] +authors.workspace = true +description = "Shared database capabilities for Miden node" +edition.workspace = true +homepage.workspace = true +keywords = ["database", "miden", "node"] +license.workspace = true +name = "miden-node-db" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +deadpool = { default-features = false, workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +deadpool-sync = { default-features = false, workspace = true } +diesel = { features = ["sqlite"], workspace = true } +miden-protocol = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } diff --git a/crates/db/src/conv.rs b/crates/db/src/conv.rs new file mode 100644 index 0000000000..64c853c73d --- /dev/null +++ b/crates/db/src/conv.rs @@ -0,0 +1,183 @@ +//! Central place to define conversion from and to database primitive types +//! +//! Eventually, all of them should have types and we can implement a trait for them +//! rather than function pairs. +//! +//! Notice: All of them are infallible. The invariant is a sane content of the database +//! and humans ensure the sanity of casts. +//! +//! Notice: Keep in mind if you _need_ to expand the datatype, only if you require sorting this is +//! mandatory! +//! +//! Notice: Ensure you understand what casting does at the bit-level before changing any. +//! +//! Notice: Changing any of these are _backwards-incompatible_ changes that are not caught/covered +//! by migrations! + +#![expect( + clippy::inline_always, + reason = "Just unification helpers of 1-2 lines of casting types" +)] +#![expect( + dead_code, + reason = "Not all converters are used bidirectionally, however, keeping them is a good thing" +)] +#![expect( + clippy::cast_sign_loss, + reason = "This is the one file where we map the signed database types to the working types" +)] +#![expect( + clippy::cast_possible_wrap, + reason = "We will not approach the item count where i64 and usize casting will cause issues + on relevant platforms" +)] + +use miden_protocol::Felt; +use miden_protocol::account::{StorageSlotName, StorageSlotType}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteTag; + +#[derive(Debug, thiserror::Error)] +#[error("failed to convert from database type {from_type} into {into_type}")] +pub struct DatabaseTypeConversionError { + source: Box, + from_type: &'static str, + into_type: &'static str, +} + +/// Convert from and to it's database representation and back +/// +/// We do not assume sanity of DB types. +pub trait SqlTypeConvert: Sized { + type Raw: Sized; + + fn to_raw_sql(self) -> Self::Raw; + fn from_raw_sql(_raw: Self::Raw) -> Result; + + fn map_err( + source: E, + ) -> DatabaseTypeConversionError { + DatabaseTypeConversionError { + source: Box::new(source), + from_type: std::any::type_name::(), + into_type: std::any::type_name::(), + } + } +} + +impl SqlTypeConvert for BlockNumber { + type Raw = i64; + + fn from_raw_sql(raw: Self::Raw) -> Result { + u32::try_from(raw).map(BlockNumber::from).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + i64::from(self.as_u32()) + } +} + +impl SqlTypeConvert for NoteTag { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[expect(clippy::cast_sign_loss)] + Ok(NoteTag::new(raw as u32)) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + self.as_u32() as i32 + } +} + +impl SqlTypeConvert for StorageSlotType { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid storage slot type value {0}")] + struct ValueError(i32); + + Ok(match raw { + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, + }) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + match self { + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, + } + } +} + +impl SqlTypeConvert for StorageSlotName { + type Raw = String; + + fn from_raw_sql(raw: Self::Raw) -> Result { + StorageSlotName::new(raw).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + String::from(self) + } +} + +// Raw type conversions - eventually introduce wrapper types +// =========================================================== + +#[inline(always)] +pub(crate) fn raw_sql_to_nullifier_prefix(raw: i32) -> u16 { + debug_assert!(raw >= 0); + raw as u16 +} +#[inline(always)] +pub(crate) fn nullifier_prefix_to_raw_sql(prefix: u16) -> i32 { + i32::from(prefix) +} + +#[inline(always)] +pub(crate) fn raw_sql_to_nonce(raw: i64) -> Felt { + debug_assert!(raw >= 0); + Felt::new(raw as u64) +} +#[inline(always)] +pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { + nonce.as_int() as i64 +} + +#[inline(always)] +pub(crate) fn raw_sql_to_fungible_delta(raw: i64) -> i64 { + raw +} +#[inline(always)] +pub(crate) fn fungible_delta_to_raw_sql(delta: i64) -> i64 { + delta +} + +#[inline(always)] +#[expect(clippy::cast_sign_loss)] +pub(crate) fn raw_sql_to_note_type(raw: i32) -> u8 { + raw as u8 +} +#[inline(always)] +pub(crate) fn note_type_to_raw_sql(note_type: u8) -> i32 { + i32::from(note_type) +} + +#[inline(always)] +pub(crate) fn raw_sql_to_idx(raw: i32) -> usize { + raw as usize +} +#[inline(always)] +pub(crate) fn idx_to_raw_sql(idx: usize) -> i32 { + idx as i32 +} diff --git a/crates/db/src/errors.rs b/crates/db/src/errors.rs new file mode 100644 index 0000000000..5e59ff4b9a --- /dev/null +++ b/crates/db/src/errors.rs @@ -0,0 +1,98 @@ +use std::any::type_name; +use std::io; + +use deadpool_sync::InteractError; +use thiserror::Error; + +// SCHEMA VERIFICATION ERROR +// ================================================================================================= + +/// Errors that can occur during schema verification. +#[derive(Debug, Error)] +pub enum SchemaVerificationError { + #[error("failed to create in-memory reference database")] + InMemoryDbCreation(#[source] diesel::ConnectionError), + #[error("failed to apply migrations to reference database")] + MigrationApplication(#[source] Box), + #[error("failed to extract schema from database")] + SchemaExtraction(#[source] diesel::result::Error), + #[error( + "schema mismatch: expected {expected_count} objects, found {actual_count} \ + ({missing_count} missing, {extra_count} unexpected)" + )] + Mismatch { + expected_count: usize, + actual_count: usize, + missing_count: usize, + extra_count: usize, + }, +} + +// DATABASE ERROR +// ================================================================================================= + +#[derive(Debug, Error)] +pub enum DatabaseError { + #[error("SQLite pool interaction failed: {0}")] + InteractError(String), + #[error("setup deadpool connection pool failed")] + ConnectionPoolObtainError(#[from] Box), + #[error("conversion from SQL to rust type {to} failed")] + ConversionSqlToRust { + #[source] + inner: Option>, + to: &'static str, + }, + #[error(transparent)] + Diesel(#[from] diesel::result::Error), + #[error("schema verification failed")] + SchemaVerification(#[from] SchemaVerificationError), + #[error("I/O error")] + Io(#[from] io::Error), + #[error("pool build error")] + PoolBuild(#[from] deadpool::managed::BuildError), + #[error("Setup deadpool connection pool failed")] + Pool(#[from] deadpool::managed::PoolError), +} + +impl DatabaseError { + /// Converts from `InteractError` + /// + /// Note: Required since `InteractError` has at least one enum + /// variant that is _not_ `Send + Sync` and hence prevents the + /// `Sync` auto implementation. + /// This does an internal conversion to string while maintaining + /// convenience. + /// + /// Using `MSG` as const so it can be called as + /// `.map_err(DatabaseError::interact::<"Your message">)` + pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { + let msg = msg.to_string(); + Self::InteractError(format!("{msg} failed: {e:?}")) + } + + /// Failed to convert an SQL entry to a rust representation + pub fn conversiont_from_sql(err: MaybeE) -> DatabaseError + where + MaybeE: Into>, + E: std::error::Error + Send + Sync + 'static, + { + DatabaseError::ConversionSqlToRust { + inner: err.into().map(|err| Box::new(err) as Box), + to: type_name::(), + } + } + + /// Creates a deserialization error with a static context string and the original error. + /// + /// This is a convenience wrapper around [`ConversionSqlToRust`](Self::ConversionSqlToRust). + pub fn deserialization( + context: &'static str, + source: impl std::error::Error + Send + Sync + 'static, + ) -> Self { + Self::ConversionSqlToRust { + inner: Some(Box::new(source)), + to: context, + } + } +} diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs new file mode 100644 index 0000000000..7000f131d1 --- /dev/null +++ b/crates/db/src/lib.rs @@ -0,0 +1,77 @@ +mod conv; +mod errors; +mod manager; + +use std::path::Path; + +pub use conv::{DatabaseTypeConversionError, SqlTypeConvert}; +use diesel::{RunQueryDsl, SqliteConnection}; +pub use errors::{DatabaseError, SchemaVerificationError}; +pub use manager::{ConnectionManager, ConnectionManagerError, configure_connection_on_creation}; +use tracing::Instrument; + +pub type Result = std::result::Result; + +/// Database handle that provides fundamental operations that various components of Miden Node can +/// utililze for their storage needs. +#[derive(Clone)] +pub struct Db { + pool: deadpool_diesel::Pool>, +} + +impl Db { + /// Creates a new database instance with the provided connection pool. + pub fn new(database_filepath: &Path) -> Result { + let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); + let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; + Ok(Self { pool }) + } + + /// Create and commit a transaction with the queries added in the provided closure + pub async fn transact(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result + + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .in_current_span() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) + .in_current_span() + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } + + /// Run the query _without_ a transaction + pub async fn query(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(move |conn| { + let r = query(conn)?; + Ok(r) + }) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } +} diff --git a/crates/store/src/db/manager.rs b/crates/db/src/manager.rs similarity index 85% rename from crates/store/src/db/manager.rs rename to crates/db/src/manager.rs index fca9a33db6..c34e7a15e9 100644 --- a/crates/store/src/db/manager.rs +++ b/crates/db/src/manager.rs @@ -36,12 +36,12 @@ impl ConnectionManagerError { /// Create a connection manager with per-connection setup /// /// Particularly, `foreign_key` checks are enabled and using a write-append-log for journaling. -pub(crate) struct ConnectionManager { +pub struct ConnectionManager { pub(crate) manager: deadpool_diesel::sqlite::Manager, } impl ConnectionManager { - pub(crate) fn new(database_path: &str) -> Self { + pub fn new(database_path: &str) -> Self { let manager = deadpool_diesel::sqlite::Manager::new( database_path.to_owned(), deadpool_diesel::sqlite::Runtime::Tokio1, @@ -75,9 +75,14 @@ impl deadpool::managed::Manager for ConnectionManager { } } -pub(crate) fn configure_connection_on_creation( +pub fn configure_connection_on_creation( conn: &mut SqliteConnection, ) -> Result<(), ConnectionManagerError> { + // Wait up to 3 seconds for writer locks before erroring. + diesel::sql_query("PRAGMA busy_timeout=3000") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; + // Enable the WAL mode. This allows concurrent reads while the transaction is being written, // this is required for proper synchronization of the servers in-memory and on-disk // representations (see [State::apply_block]) @@ -89,5 +94,10 @@ pub(crate) fn configure_connection_on_creation( diesel::sql_query("PRAGMA foreign_keys=ON") .execute(conn) .map_err(ConnectionManagerError::ConnectionParamSetup)?; + + // Set busy timeout so concurrent writers wait instead of immediately failing. + diesel::sql_query("PRAGMA busy_timeout=5000") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; Ok(()) } diff --git a/crates/large-smt-backend-rocksdb/Cargo.toml b/crates/large-smt-backend-rocksdb/Cargo.toml new file mode 100644 index 0000000000..c7f009f929 --- /dev/null +++ b/crates/large-smt-backend-rocksdb/Cargo.toml @@ -0,0 +1,22 @@ +[package] +authors.workspace = true +description = "Large-scale Sparse Merkle Tree backed by pluggable storage - RocksDB backend" +edition.workspace = true +homepage.workspace = true +keywords = ["merkle", "miden", "node", "smt"] +license.workspace = true +name = "miden-large-smt-backend-rocksdb" +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +miden-crypto = { features = ["concurrent", "std"], workspace = true } +miden-protocol = { features = ["std"], workspace = true } +rayon = { version = "1.10" } +rocksdb = { default-features = false, features = ["bindgen-runtime", "lz4"], version = "0.24" } +winter-utils = { version = "0.13" } diff --git a/crates/large-smt-backend-rocksdb/README.md b/crates/large-smt-backend-rocksdb/README.md new file mode 100644 index 0000000000..4b612c3257 --- /dev/null +++ b/crates/large-smt-backend-rocksdb/README.md @@ -0,0 +1,45 @@ +# miden-large-smt-backend-rocksdb + +Large-scale Sparse Merkle Tree backed by pluggable storage - RocksDB backend implementation. + +This crate provides `LargeSmt`, a hybrid SMT implementation that stores the top of the tree +(depths 0–23) in memory and persists the lower depths (24–64) in storage as fixed-size subtrees. +This hybrid layout scales beyond RAM while keeping common operations fast. + +## Migration Status + +This crate is the future home for `LargeSmt` and its storage backends. Currently it re-exports +types from `miden-protocol` (which re-exports from `miden-crypto`). + +The migration will be completed in phases: +1. ✅ Create this crate as a re-export layer (current state) +2. Copy the full implementation from miden-crypto to this crate +3. Update miden-crypto to remove the rocksdb feature +4. Update dependents to use this crate directly + +## Features + +- **concurrent**: Enables parallel processing with rayon (enabled by default) +- **rocksdb**: (Future) Enables RocksDB storage backend + +## Usage + +```rust +use miden_large_smt::{LargeSmt, MemoryStorage}; + +// Create an empty tree with in-memory storage +let storage = MemoryStorage::new(); +let smt = LargeSmt::new(storage).unwrap(); +``` + +## Re-exported Types + +This crate re-exports the following types from `miden-protocol`: + +- `LargeSmt` - The large-scale SMT implementation +- `LargeSmtError` - Error type for LargeSmt operations +- `MemoryStorage` - In-memory storage backend +- `SmtStorage` - Storage backend trait +- `Subtree` - Serializable subtree representation +- `StorageUpdates` / `StorageUpdateParts` - Batch update types +- Various SMT types: `Smt`, `SmtLeaf`, `SmtProof`, `LeafIndex`, etc. diff --git a/crates/large-smt-backend-rocksdb/src/helpers.rs b/crates/large-smt-backend-rocksdb/src/helpers.rs new file mode 100644 index 0000000000..23f3c8d88f --- /dev/null +++ b/crates/large-smt-backend-rocksdb/src/helpers.rs @@ -0,0 +1,83 @@ +use miden_crypto::merkle::smt::{MAX_LEAF_ENTRIES, SmtLeaf, SmtLeafError}; +use miden_crypto::word::LexicographicWord; +use rocksdb::Error as RocksDbError; + +use crate::{StorageError, Word}; + +pub(crate) fn map_rocksdb_err(err: RocksDbError) -> StorageError { + StorageError::Backend(Box::new(err)) +} + +pub(crate) fn insert_into_leaf( + leaf: &mut SmtLeaf, + key: Word, + value: Word, +) -> Result, StorageError> { + match leaf { + SmtLeaf::Empty(_) => { + *leaf = SmtLeaf::new_single(key, value); + Ok(None) + }, + SmtLeaf::Single(kv_pair) => { + if kv_pair.0 == key { + let old_value = kv_pair.1; + kv_pair.1 = value; + Ok(Some(old_value)) + } else { + let mut pairs = vec![*kv_pair, (key, value)]; + pairs.sort_by(|(key_1, _), (key_2, _)| { + LexicographicWord::from(*key_1).cmp(&LexicographicWord::from(*key_2)) + }); + *leaf = SmtLeaf::Multiple(pairs); + Ok(None) + } + }, + SmtLeaf::Multiple(kv_pairs) => match kv_pairs.binary_search_by(|kv_pair| { + LexicographicWord::from(kv_pair.0).cmp(&LexicographicWord::from(key)) + }) { + Ok(pos) => { + let old_value = kv_pairs[pos].1; + kv_pairs[pos].1 = value; + Ok(Some(old_value)) + }, + Err(pos) => { + if kv_pairs.len() >= MAX_LEAF_ENTRIES { + return Err(StorageError::Leaf(SmtLeafError::TooManyLeafEntries { + actual: kv_pairs.len() + 1, + })); + } + kv_pairs.insert(pos, (key, value)); + Ok(None) + }, + }, + } +} + +pub(crate) fn remove_from_leaf(leaf: &mut SmtLeaf, key: Word) -> (Option, bool) { + match leaf { + SmtLeaf::Empty(_) => (None, false), + SmtLeaf::Single((key_at_leaf, value_at_leaf)) => { + if *key_at_leaf == key { + let old_value = *value_at_leaf; + *leaf = SmtLeaf::new_empty(key.into()); + (Some(old_value), true) + } else { + (None, false) + } + }, + SmtLeaf::Multiple(kv_pairs) => match kv_pairs.binary_search_by(|kv_pair| { + LexicographicWord::from(kv_pair.0).cmp(&LexicographicWord::from(key)) + }) { + Ok(pos) => { + let old_value = kv_pairs[pos].1; + kv_pairs.remove(pos); + debug_assert!(!kv_pairs.is_empty()); + if kv_pairs.len() == 1 { + *leaf = SmtLeaf::Single(kv_pairs[0]); + } + (Some(old_value), false) + }, + Err(_) => (None, false), + }, + } +} diff --git a/crates/large-smt-backend-rocksdb/src/lib.rs b/crates/large-smt-backend-rocksdb/src/lib.rs new file mode 100644 index 0000000000..563439c9f4 --- /dev/null +++ b/crates/large-smt-backend-rocksdb/src/lib.rs @@ -0,0 +1,59 @@ +//! Large-scale Sparse Merkle Tree backed by pluggable storage. +//! +//! `LargeSmt` stores the top of the tree (depths 0–23) in memory and persists the lower +//! depths (24–64) in storage as fixed-size subtrees. This hybrid layout scales beyond RAM +//! while keeping common operations fast. +//! +//! # Usage +//! +//! ```ignore +//! use miden_large_smt::{LargeSmt, MemoryStorage}; +//! +//! // Create an empty tree with in-memory storage +//! let storage = MemoryStorage::new(); +//! let smt = LargeSmt::new(storage).unwrap(); +//! ``` +//! +//! ```ignore +//! use miden_large_smt_backend_rocksdb::{LargeSmt, RocksDbConfig, RocksDbStorage}; +//! +//! let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db")).unwrap(); +//! let smt = LargeSmt::new(storage).unwrap(); +//! ``` + +extern crate alloc; + +mod helpers; +#[expect(clippy::doc_markdown, clippy::inline_always)] +mod rocksdb; +// Re-export from miden-protocol. +pub use miden_protocol::crypto::merkle::smt::{ + InnerNode, + LargeSmt, + LargeSmtError, + LeafIndex, + MemoryStorage, + SMT_DEPTH, + Smt, + SmtLeaf, + SmtLeafError, + SmtProof, + SmtStorage, + StorageError, + StorageUpdateParts, + StorageUpdates, + Subtree, + SubtreeError, + SubtreeUpdate, +}; +// Also re-export commonly used types for convenience +pub use miden_protocol::{ + EMPTY_WORD, + Felt, + Word, + crypto::{ + hash::rpo::Rpo256, + merkle::{EmptySubtreeRoots, InnerNodeInfo, MerkleError, NodeIndex, SparseMerklePath}, + }, +}; +pub use rocksdb::{RocksDbConfig, RocksDbStorage}; diff --git a/crates/large-smt-backend-rocksdb/src/rocksdb.rs b/crates/large-smt-backend-rocksdb/src/rocksdb.rs new file mode 100644 index 0000000000..92f187c4d9 --- /dev/null +++ b/crates/large-smt-backend-rocksdb/src/rocksdb.rs @@ -0,0 +1,1329 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; +use std::path::PathBuf; +use std::sync::Arc; + +use miden_crypto::Map; +use miden_crypto::merkle::NodeIndex; +use miden_crypto::merkle::smt::{InnerNode, SmtLeaf, Subtree}; +use rocksdb::{ + BlockBasedOptions, + Cache, + ColumnFamilyDescriptor, + DB, + DBCompactionStyle, + DBCompressionType, + DBIteratorWithThreadMode, + FlushOptions, + IteratorMode, + Options, + ReadOptions, + WriteBatch, +}; +use winter_utils::{Deserializable, Serializable}; + +use super::{SmtStorage, StorageError, StorageUpdateParts, StorageUpdates, SubtreeUpdate}; +use crate::helpers::{insert_into_leaf, map_rocksdb_err, remove_from_leaf}; +use crate::{EMPTY_WORD, Word}; + +const IN_MEMORY_DEPTH: u8 = 24; + +/// The name of the `RocksDB` column family used for storing SMT leaves. +const LEAVES_CF: &str = "leaves"; +/// The names of the `RocksDB` column families used for storing SMT subtrees (deep nodes). +const SUBTREE_24_CF: &str = "st24"; +const SUBTREE_32_CF: &str = "st32"; +const SUBTREE_40_CF: &str = "st40"; +const SUBTREE_48_CF: &str = "st48"; +const SUBTREE_56_CF: &str = "st56"; +const SUBTREE_DEPTHS: [u8; 5] = [56, 48, 40, 32, 24]; + +/// The name of the `RocksDB` column family used for storing metadata (e.g., root, counts). +const METADATA_CF: &str = "metadata"; +/// The name of the `RocksDB` column family used for storing level 24 hashes for fast tree +/// rebuilding. +const DEPTH_24_CF: &str = "depth24"; + +/// The key used in the `METADATA_CF` column family to store the SMT's root hash. +const ROOT_KEY: &[u8] = b"smt_root"; +/// The key used in the `METADATA_CF` column family to store the total count of non-empty leaves. +const LEAF_COUNT_KEY: &[u8] = b"leaf_count"; +/// The key used in the `METADATA_CF` column family to store the total count of key-value entries. +const ENTRY_COUNT_KEY: &[u8] = b"entry_count"; + +/// A `RocksDB`-backed persistent storage implementation for a Sparse Merkle Tree (SMT). +/// +/// Implements the `SmtStorage` trait, providing durable storage for SMT components +/// including leaves, subtrees (for deeper parts of the tree), and metadata like the SMT root +/// and counts. It leverages `RocksDB` column families to organize data: +/// - `LEAVES_CF` ("leaves"): Stores `SmtLeaf` data, keyed by their logical u64 index. +/// - `SUBTREE_24_CF` ("st24"): Stores serialized `Subtree` data at depth 24, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_32_CF` ("st32"): Stores serialized `Subtree` data at depth 32, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_40_CF` ("st40"): Stores serialized `Subtree` data at depth 40, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_48_CF` ("st48"): Stores serialized `Subtree` data at depth 48, keyed by their root +/// `NodeIndex`. +/// - `SUBTREE_56_CF` ("st56"): Stores serialized `Subtree` data at depth 56, keyed by their root +/// `NodeIndex`. +/// - `METADATA_CF` ("metadata"): Stores overall SMT metadata such as the current root hash, total +/// leaf count, and total entry count. +#[derive(Debug, Clone)] +pub struct RocksDbStorage { + db: Arc, +} + +impl RocksDbStorage { + /// Opens or creates a RocksDB database at the specified `path` and configures it for SMT + /// storage. + /// + /// This method sets up the necessary column families (`leaves`, `subtrees`, `metadata`) + /// and applies various RocksDB options for performance, such as caching, bloom filters, + /// and compaction strategies tailored for SMT workloads. + /// + /// # Errors + /// Returns `StorageError::Backend` if the database cannot be opened or configured, + /// for example, due to path issues, permissions, or RocksDB internal errors. + pub fn open(config: RocksDbConfig) -> Result { + // Base DB options + let mut db_opts = Options::default(); + // Create DB if it doesn't exist + db_opts.create_if_missing(true); + // Auto-create missing column families + db_opts.create_missing_column_families(true); + #[expect(clippy::cast_possible_wrap)] + // Tune compaction threads to match CPU cores + db_opts.increase_parallelism(rayon::current_num_threads() as i32); + // Limit the number of open file handles + db_opts.set_max_open_files(config.max_open_files); + #[expect(clippy::cast_possible_wrap)] + // Parallelize flush/compaction up to CPU count + db_opts.set_max_background_jobs(rayon::current_num_threads() as i32); + // Maximum WAL size + db_opts.set_max_total_wal_size(512 * 1024 * 1024); + + // Shared block cache across all column families + let cache = Cache::new_lru_cache(config.cache_size); + + // Common table options for bloom filtering and cache + let mut table_opts = BlockBasedOptions::default(); + // Use shared LRU cache for block data + table_opts.set_block_cache(&cache); + table_opts.set_bloom_filter(10.0, false); + // Enable whole-key bloom filtering (better with point lookups) + table_opts.set_whole_key_filtering(true); + // Pin L0 filter and index blocks in cache (improves performance) + table_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + + // Column family for leaves + let mut leaves_opts = Options::default(); + leaves_opts.set_block_based_table_factory(&table_opts); + // 128 MB memtable + leaves_opts.set_write_buffer_size(128 << 20); + // Allow up to 3 memtables + leaves_opts.set_max_write_buffer_number(3); + leaves_opts.set_min_write_buffer_number_to_merge(1); + // Do not retain flushed memtables in memory + leaves_opts.set_max_write_buffer_size_to_maintain(0); + // Use level-based compaction + leaves_opts.set_compaction_style(DBCompactionStyle::Level); + // 512 MB target file size + leaves_opts.set_target_file_size_base(512 << 20); + leaves_opts.set_target_file_size_multiplier(2); + // LZ4 compression + leaves_opts.set_compression_type(DBCompressionType::Lz4); + // Set level-based compaction parameters + leaves_opts.set_level_zero_file_num_compaction_trigger(8); + + // Helper to build subtree CF options with correct prefix length + #[expect(clippy::items_after_statements)] + fn subtree_cf(cache: &Cache, bloom_filter_bits: f64) -> Options { + let mut tbl = BlockBasedOptions::default(); + // Use shared LRU cache for block data + tbl.set_block_cache(cache); + // Set bloom filter for subtree lookups + tbl.set_bloom_filter(bloom_filter_bits, false); + // Enable whole-key bloom filtering + tbl.set_whole_key_filtering(true); + // Pin L0 filter and index blocks in cache + tbl.set_pin_l0_filter_and_index_blocks_in_cache(true); + + let mut opts = Options::default(); + opts.set_block_based_table_factory(&tbl); + // 128 MB memtable + opts.set_write_buffer_size(128 << 20); + opts.set_max_write_buffer_number(3); + opts.set_min_write_buffer_number_to_merge(1); + // Do not retain flushed memtables in memory + opts.set_max_write_buffer_size_to_maintain(0); + // Use level-based compaction + opts.set_compaction_style(DBCompactionStyle::Level); + // 512 MB target file size + opts.set_target_file_size_base(512 << 20); + opts.set_target_file_size_multiplier(2); + // LZ4 compression + opts.set_compression_type(DBCompressionType::Lz4); + // Set level-based compaction parameters + opts.set_level_zero_file_num_compaction_trigger(8); + opts + } + + let mut depth24_opts = Options::default(); + depth24_opts.set_compression_type(DBCompressionType::Lz4); + depth24_opts.set_block_based_table_factory(&table_opts); + + // Metadata CF with no compression + let mut metadata_opts = Options::default(); + metadata_opts.set_compression_type(DBCompressionType::None); + + // Define column families with tailored options + let cfs = vec![ + ColumnFamilyDescriptor::new(LEAVES_CF, leaves_opts), + ColumnFamilyDescriptor::new(SUBTREE_24_CF, subtree_cf(&cache, 8.0)), + ColumnFamilyDescriptor::new(SUBTREE_32_CF, subtree_cf(&cache, 10.0)), + ColumnFamilyDescriptor::new(SUBTREE_40_CF, subtree_cf(&cache, 10.0)), + ColumnFamilyDescriptor::new(SUBTREE_48_CF, subtree_cf(&cache, 12.0)), + ColumnFamilyDescriptor::new(SUBTREE_56_CF, subtree_cf(&cache, 12.0)), + ColumnFamilyDescriptor::new(METADATA_CF, metadata_opts), + ColumnFamilyDescriptor::new(DEPTH_24_CF, depth24_opts), + ]; + + // Open the database with our tuned CFs + let db = DB::open_cf_descriptors(&db_opts, config.path, cfs).map_err(map_rocksdb_err)?; + + Ok(Self { db: Arc::new(db) }) + } + + /// Syncs the RocksDB database to disk. + /// + /// This ensures that all data is persisted to disk. + /// + /// # Errors + /// - Returns `StorageError::Backend` if the flush operation fails. + fn sync(&self) -> Result<(), StorageError> { + let mut fopts = FlushOptions::default(); + fopts.set_wait(true); + + for name in [ + LEAVES_CF, + SUBTREE_24_CF, + SUBTREE_32_CF, + SUBTREE_40_CF, + SUBTREE_48_CF, + SUBTREE_56_CF, + METADATA_CF, + DEPTH_24_CF, + ] { + let cf = self.cf_handle(name)?; + self.db.flush_cf_opt(cf, &fopts).map_err(map_rocksdb_err)?; + } + + self.db.flush_wal(true).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Converts an index (u64) into a fixed-size byte array for use as a `RocksDB` key. + #[inline(always)] + fn index_db_key(index: u64) -> [u8; 8] { + index.to_be_bytes() + } + + /// Converts a `NodeIndex` (for a subtree root) into a `KeyBytes` for use as a `RocksDB` key. + /// The `KeyBytes` is a wrapper around a 8-byte value with a variable-length prefix. + #[inline(always)] + fn subtree_db_key(index: NodeIndex) -> KeyBytes { + let keep = match index.depth() { + 24 => 3, + 32 => 4, + 40 => 5, + 48 => 6, + 56 => 7, + d => panic!("unsupported depth {d}"), + }; + KeyBytes::new(index.value(), keep) + } + + /// Retrieves a handle to a `RocksDB` column family by its name. + /// + /// # Errors + /// Returns `StorageError::Backend` if the column family with the given `name` does not + /// exist. + fn cf_handle(&self, name: &str) -> Result<&rocksdb::ColumnFamily, StorageError> { + self.db + .cf_handle(name) + .ok_or_else(|| StorageError::Unsupported(format!("unknown column family `{name}`"))) + } + + /* helper: CF handle from NodeIndex ------------------------------------- */ + #[inline(always)] + fn subtree_cf(&self, index: NodeIndex) -> &rocksdb::ColumnFamily { + let name = cf_for_depth(index.depth()); + self.cf_handle(name).expect("CF handle missing") + } +} + +impl SmtStorage for RocksDbStorage { + /// Retrieves the SMT root hash from the `METADATA_CF` column family. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::DeserializationError`: If the retrieved root hash bytes cannot be + /// deserialized. + fn get_root(&self) -> Result, StorageError> { + let cf = self.cf_handle(METADATA_CF)?; + match self.db.get_cf(cf, ROOT_KEY).map_err(map_rocksdb_err)? { + Some(bytes) => { + let digest = Word::read_from_bytes(&bytes)?; + Ok(Some(digest)) + }, + None => Ok(None), + } + } + + /// Stores the SMT root hash in the `METADATA_CF` column family. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + fn set_root(&self, root: Word) -> Result<(), StorageError> { + let cf = self.cf_handle(METADATA_CF)?; + self.db.put_cf(cf, ROOT_KEY, root.to_bytes()).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Retrieves the total count of non-empty leaves from the `METADATA_CF` column family. + /// Returns 0 if the count is not found. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::BadValueLen`: If the retrieved count bytes are invalid. + fn leaf_count(&self) -> Result { + let cf = self.cf_handle(METADATA_CF)?; + self.db + .get_cf(cf, LEAF_COUNT_KEY) + .map_err(map_rocksdb_err)? + .map_or(Ok(0), |bytes| { + let arr: [u8; 8] = + bytes.as_slice().try_into().map_err(|_| StorageError::BadValueLen { + what: "leaf count", + expected: 8, + found: bytes.len(), + })?; + Ok(usize::from_be_bytes(arr)) + }) + } + + /// Retrieves the total count of key-value entries from the `METADATA_CF` column family. + /// Returns 0 if the count is not found. + /// + /// # Errors + /// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::BadValueLen`: If the retrieved count bytes are invalid. + fn entry_count(&self) -> Result { + let cf = self.cf_handle(METADATA_CF)?; + self.db + .get_cf(cf, ENTRY_COUNT_KEY) + .map_err(map_rocksdb_err)? + .map_or(Ok(0), |bytes| { + let arr: [u8; 8] = + bytes.as_slice().try_into().map_err(|_| StorageError::BadValueLen { + what: "entry count", + expected: 8, + found: bytes.len(), + })?; + Ok(usize::from_be_bytes(arr)) + }) + } + + /// Inserts a key-value pair into the SMT leaf at the specified logical `index`. + /// + /// This operation involves: + /// 1. Retrieving the current leaf (if any) at `index`. + /// 2. Inserting the new key-value pair into the leaf. + /// 3. Updating the leaf and entry counts in the metadata column family. + /// 4. Writing all changes (leaf data, counts) to RocksDB in a single batch. + /// + /// Note: This only updates the leaf. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If existing leaf data is corrupt. + #[expect(clippy::single_match_else)] + fn insert_value( + &self, + index: u64, + key: Word, + value: Word, + ) -> Result, StorageError> { + debug_assert_ne!(value, EMPTY_WORD); + + let mut batch = WriteBatch::default(); + + // Fetch initial counts. + let mut current_leaf_count = self.leaf_count()?; + let mut current_entry_count = self.entry_count()?; + + let leaves_cf = self.cf_handle(LEAVES_CF)?; + let db_key = Self::index_db_key(index); + + let maybe_leaf = self.get_leaf(index)?; + + let value_to_return: Option = match maybe_leaf { + Some(mut existing_leaf) => { + let old_value = insert_into_leaf(&mut existing_leaf, key, value)?; + // Determine if the overall SMT entry_count needs to change. + // entry_count increases if: + // 1. The key was not present in this leaf before (`old_value` is `None`). + // 2. The key was present but held `EMPTY_WORD` (`old_value` is + // `Some(EMPTY_WORD)`). + if old_value.is_none_or(|old_v| old_v == EMPTY_WORD) { + current_entry_count += 1; + } + // current_leaf_count does not change because the leaf itself already existed. + batch.put_cf(leaves_cf, db_key, existing_leaf.to_bytes()); + old_value + }, + None => { + // Leaf at `index` does not exist, so create a new one. + let new_leaf = SmtLeaf::Single((key, value)); + // A new leaf is created. + current_leaf_count += 1; + // This new leaf contains one new SMT entry. + current_entry_count += 1; + batch.put_cf(leaves_cf, db_key, new_leaf.to_bytes()); + // No previous value, as the leaf (and thus the key in it) was new. + None + }, + }; + + // Add updated metadata counts to the batch. + let metadata_cf = self.cf_handle(METADATA_CF)?; + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, current_leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, current_entry_count.to_be_bytes()); + + // Atomically write all changes (leaf data and metadata counts). + self.db.write(batch).map_err(map_rocksdb_err)?; + + Ok(value_to_return) + } + + /// Removes a key-value pair from the SMT leaf at the specified logical `index`. + /// + /// This operation involves: + /// 1. Retrieving the leaf at `index`. + /// 2. Removing the `key` from the leaf. If the leaf becomes empty, it's deleted from RocksDB. + /// 3. Updating the leaf and entry counts in the metadata column family. + /// 4. Writing all changes (leaf data/deletion, counts) to RocksDB in a single batch. + /// + /// Returns `Ok(None)` if the leaf at `index` does not exist or the `key` is not found. + /// + /// Note: This only updates the leaf. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If existing leaf data is corrupt. + fn remove_value(&self, index: u64, key: Word) -> Result, StorageError> { + let Some(mut leaf) = self.get_leaf(index)? else { + return Ok(None); + }; + + let mut batch = WriteBatch::default(); + let cf = self.cf_handle(LEAVES_CF)?; + let metadata_cf = self.cf_handle(METADATA_CF)?; + let db_key = Self::index_db_key(index); + let mut entry_count = self.entry_count()?; + let mut leaf_count = self.leaf_count()?; + + let (current_value, is_empty) = remove_from_leaf(&mut leaf, key); + if let Some(current_value) = current_value + && current_value != EMPTY_WORD + { + entry_count -= 1; + } + if is_empty { + leaf_count -= 1; + batch.delete_cf(cf, db_key); + } else { + batch.put_cf(cf, db_key, leaf.to_bytes()); + } + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, entry_count.to_be_bytes()); + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(current_value) + } + + /// Retrieves a single SMT leaf node by its logical `index` from the `LEAVES_CF` column family. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If the retrieved leaf data is corrupt. + fn get_leaf(&self, index: u64) -> Result, StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let key = Self::index_db_key(index); + match self.db.get_cf(cf, key).map_err(map_rocksdb_err)? { + Some(bytes) => { + let leaf = SmtLeaf::read_from_bytes(&bytes)?; + Ok(Some(leaf)) + }, + None => Ok(None), + } + } + + /// Sets or updates multiple SMT leaf nodes in the `LEAVES_CF` column family. + /// + /// This method performs a batch write to RocksDB. It also updates the global + /// leaf and entry counts in the `METADATA_CF` based on the provided `leaves` map, + /// overwriting any previous counts. + /// + /// Note: This method assumes the provided `leaves` map represents the entirety + /// of leaves to be stored or that counts are being explicitly reset. + /// Note: This only updates the leaves. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs. + fn set_leaves(&self, leaves: Map) -> Result<(), StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let leaf_count: usize = leaves.len(); + let entry_count: usize = leaves.values().map(|leaf| leaf.entries().len()).sum(); + let mut batch = WriteBatch::default(); + for (idx, leaf) in leaves { + let key = Self::index_db_key(idx); + let value = leaf.to_bytes(); + batch.put_cf(cf, key, &value); + } + let metadata_cf = self.cf_handle(METADATA_CF)?; + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, entry_count.to_be_bytes()); + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Removes a single SMT leaf node by its logical `index` from the `LEAVES_CF` column family. + /// + /// Important: This method currently *does not* update the global leaf and entry counts + /// in the metadata. Callers are responsible for managing these counts separately + /// if using this method directly, or preferably use `apply` or `remove_value` which handle + /// counts. + /// + /// Note: This only removes the leaf. Callers are responsible for recomputing and + /// persisting the corresponding inner nodes. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If the retrieved (to be returned) leaf data is + /// corrupt. + fn remove_leaf(&self, index: u64) -> Result, StorageError> { + let key = Self::index_db_key(index); + let cf = self.cf_handle(LEAVES_CF)?; + let old_bytes = self.db.get_cf(cf, key).map_err(map_rocksdb_err)?; + self.db.delete_cf(cf, key).map_err(map_rocksdb_err)?; + Ok(old_bytes + .map(|bytes| SmtLeaf::read_from_bytes(&bytes).expect("failed to deserialize leaf"))) + } + + /// Retrieves multiple SMT leaf nodes by their logical `indices` using RocksDB's `multi_get_cf`. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs. + /// - `StorageError::DeserializationError`: If any retrieved leaf data is corrupt. + fn get_leaves(&self, indices: &[u64]) -> Result>, StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let db_keys: Vec<[u8; 8]> = indices.iter().map(|&idx| Self::index_db_key(idx)).collect(); + let results = self.db.multi_get_cf(db_keys.iter().map(|k| (cf, k.as_ref()))); + + results + .into_iter() + .map(|result| match result { + Ok(Some(bytes)) => Ok(Some(SmtLeaf::read_from_bytes(&bytes)?)), + Ok(None) => Ok(None), + Err(e) => Err(map_rocksdb_err(e)), + }) + .collect() + } + + /// Returns true if the storage has any leaves. + /// + /// # Errors + /// Returns `StorageError` if the storage read operation fails. + fn has_leaves(&self) -> Result { + Ok(self.leaf_count()? > 0) + } + + /// Batch-retrieves multiple subtrees from RocksDB by their node indices. + /// + /// This method groups requests by subtree depth into column family buckets, + /// then performs parallel `multi_get` operations to efficiently retrieve + /// all subtrees. Results are deserialized and placed in the same order as + /// the input indices. + /// + /// Note: Retrieval is performed in parallel. If multiple errors occur (e.g., + /// deserialization or backend errors), only the first one encountered is returned. + /// Other errors will be discarded. + /// + /// # Parameters + /// - `indices`: A slice of subtree root indices to retrieve. + /// + /// # Returns + /// - A `Vec>` where each index corresponds to the original input. + /// - `Ok(...)` if all fetches succeed. + /// - `Err(StorageError)` if any RocksDB access or deserialization fails. + fn get_subtree(&self, index: NodeIndex) -> Result, StorageError> { + let cf = self.subtree_cf(index); + let key = Self::subtree_db_key(index); + match self.db.get_cf(cf, key).map_err(map_rocksdb_err)? { + Some(bytes) => { + let subtree = Subtree::from_vec(index, &bytes)?; + Ok(Some(subtree)) + }, + None => Ok(None), + } + } + + /// Batch-retrieves multiple subtrees from RocksDB by their node indices. + /// + /// This method groups requests by subtree depth into column family buckets, + /// then performs parallel `multi_get` operations to efficiently retrieve + /// all subtrees. Results are deserialized and placed in the same order as + /// the input indices. + /// + /// # Parameters + /// - `indices`: A slice of subtree root indices to retrieve. + /// + /// # Returns + /// - A `Vec>` where each index corresponds to the original input. + /// - `Ok(...)` if all fetches succeed. + /// - `Err(StorageError)` if any RocksDB access or deserialization fails. + fn get_subtrees(&self, indices: &[NodeIndex]) -> Result>, StorageError> { + use rayon::prelude::*; + + let mut depth_buckets: [Vec<(usize, NodeIndex)>; 5] = Default::default(); + + for (original_index, &node_index) in indices.iter().enumerate() { + let depth = node_index.depth(); + let bucket_index = match depth { + 56 => 0, + 48 => 1, + 40 => 2, + 32 => 3, + 24 => 4, + _ => { + return Err(StorageError::Unsupported(format!( + "unsupported subtree depth {depth}" + ))); + }, + }; + depth_buckets[bucket_index].push((original_index, node_index)); + } + let mut results = vec![None; indices.len()]; + + // Process depth buckets in parallel + let bucket_results: Result, StorageError> = depth_buckets + .into_par_iter() + .enumerate() + .filter(|(_, bucket)| !bucket.is_empty()) + .map( + |(bucket_index, bucket)| -> Result)>, StorageError> { + let depth = SUBTREE_DEPTHS[bucket_index]; + let cf = self.cf_handle(cf_for_depth(depth))?; + let keys: Vec<_> = + bucket.iter().map(|(_, idx)| Self::subtree_db_key(*idx)).collect(); + + let db_results = self.db.multi_get_cf(keys.iter().map(|k| (cf, k.as_ref()))); + + // Process results for this bucket + bucket + .into_iter() + .zip(db_results) + .map(|((original_index, node_index), db_result)| { + let subtree = match db_result { + Ok(Some(bytes)) => Some(Subtree::from_vec(node_index, &bytes)?), + Ok(None) => None, + Err(e) => return Err(map_rocksdb_err(e)), + }; + Ok((original_index, subtree)) + }) + .collect() + }, + ) + .collect(); + + // Flatten results and place them in correct positions + for bucket_result in bucket_results? { + for (original_index, subtree) in bucket_result { + results[original_index] = subtree; + } + } + + Ok(results) + } + + /// Stores a single subtree in RocksDB and optionally updates the depth-24 root cache. + /// + /// The subtree is serialized and written to its corresponding column family. + /// If it's a depth-24 subtree, the root node’s hash is also stored in the + /// dedicated `DEPTH_24_CF` cache to support top-level reconstruction. + /// + /// # Parameters + /// - `subtree`: A reference to the subtree to be stored. + /// + /// # Errors + /// - Returns `StorageError` if column family lookup, serialization, or the write operation + /// fails. + fn set_subtree(&self, subtree: &Subtree) -> Result<(), StorageError> { + let subtrees_cf = self.subtree_cf(subtree.root_index()); + let mut batch = WriteBatch::default(); + + let key = Self::subtree_db_key(subtree.root_index()); + let value = subtree.to_vec(); + batch.put_cf(subtrees_cf, key, value); + + // Also update level 24 hash cache if this is a level 24 subtree + if subtree.root_index().depth() == IN_MEMORY_DEPTH { + let root_hash = subtree + .get_inner_node(subtree.root_index()) + .ok_or_else(|| StorageError::Unsupported("Subtree root node not found".into()))? + .hash(); + + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + let hash_key = Self::index_db_key(subtree.root_index().value()); + batch.put_cf(depth24_cf, hash_key, root_hash.to_bytes()); + } + + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Bulk-writes subtrees to storage (bypassing WAL). + /// + /// This method writes a vector of serialized `Subtree` objects directly to their + /// corresponding RocksDB column families based on their root index. + /// + /// ⚠️ **Warning:** This function should only be used during **initial SMT construction**. + /// It disables the WAL, meaning writes are **not crash-safe** and can result in data loss + /// if the process terminates unexpectedly. + /// + /// # Parameters + /// - `subtrees`: A vector of `Subtree` objects to be serialized and persisted. + /// + /// # Errors + /// - Returns `StorageError::Backend` if any column family lookup or RocksDB write fails. + fn set_subtrees(&self, subtrees: Vec) -> Result<(), StorageError> { + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + let mut batch = WriteBatch::default(); + + for subtree in subtrees { + let subtrees_cf = self.subtree_cf(subtree.root_index()); + let key = Self::subtree_db_key(subtree.root_index()); + let value = subtree.to_vec(); + batch.put_cf(subtrees_cf, key, value); + + if subtree.root_index().depth() == IN_MEMORY_DEPTH + && let Some(root_node) = subtree.get_inner_node(subtree.root_index()) + { + let hash_key = Self::index_db_key(subtree.root_index().value()); + batch.put_cf(depth24_cf, hash_key, root_node.hash().to_bytes()); + } + } + + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Removes a single SMT Subtree from storage, identified by its root `NodeIndex`. + /// + /// # Errors + /// - `StorageError::Backend`: If the subtrees column family is missing or a RocksDB error + /// occurs. + fn remove_subtree(&self, index: NodeIndex) -> Result<(), StorageError> { + let subtrees_cf = self.subtree_cf(index); + let mut batch = WriteBatch::default(); + + let key = Self::subtree_db_key(index); + batch.delete_cf(subtrees_cf, key); + + // Also remove level 24 hash cache if this is a level 24 subtree + if index.depth() == IN_MEMORY_DEPTH { + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + let hash_key = Self::index_db_key(index.value()); + batch.delete_cf(depth24_cf, hash_key); + } + + self.db.write(batch).map_err(map_rocksdb_err)?; + Ok(()) + } + + /// Retrieves a single inner node (non-leaf node) from within a Subtree. + /// + /// This method is intended for accessing nodes at depths greater than or equal to + /// `IN_MEMORY_DEPTH`. It first finds the appropriate Subtree containing the `index`, then + /// delegates to `Subtree::get_inner_node()`. + /// + /// # Errors + /// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur. + /// - `StorageError::Value`: If the containing Subtree data is corrupt. + fn get_inner_node(&self, index: NodeIndex) -> Result, StorageError> { + if index.depth() < IN_MEMORY_DEPTH { + return Err(StorageError::Unsupported( + "Cannot get inner node from upper part of the tree".into(), + )); + } + let subtree_root_index = Subtree::find_subtree_root(index); + Ok(self + .get_subtree(subtree_root_index)? + .and_then(|subtree| subtree.get_inner_node(index))) + } + + /// Sets or updates a single inner node (non-leaf node) within a Subtree. + /// + /// This method is intended for `index.depth() >= IN_MEMORY_DEPTH`. + /// If the target Subtree does not exist, it is created. The `node` is then + /// inserted into the Subtree, and the modified Subtree is written back to storage. + /// + /// # Errors + /// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur. + /// - `StorageError::Value`: If existing Subtree data is corrupt. + fn set_inner_node( + &self, + index: NodeIndex, + node: InnerNode, + ) -> Result, StorageError> { + if index.depth() < IN_MEMORY_DEPTH { + return Err(StorageError::Unsupported( + "Cannot set inner node in upper part of the tree".into(), + )); + } + + let subtree_root_index = Subtree::find_subtree_root(index); + let mut subtree = self + .get_subtree(subtree_root_index)? + .unwrap_or_else(|| Subtree::new(subtree_root_index)); + let old_node = subtree.insert_inner_node(index, node); + self.set_subtree(&subtree)?; + Ok(old_node) + } + + /// Removes a single inner node (non-leaf node) from within a Subtree. + /// + /// This method is intended for `index.depth() >= IN_MEMORY_DEPTH`. + /// If the Subtree becomes empty after removing the node, the Subtree itself + /// is removed from storage. + /// + /// # Errors + /// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur. + /// - `StorageError::Value`: If existing Subtree data is corrupt. + fn remove_inner_node(&self, index: NodeIndex) -> Result, StorageError> { + if index.depth() < IN_MEMORY_DEPTH { + return Err(StorageError::Unsupported( + "Cannot remove inner node from upper part of the tree".into(), + )); + } + + let subtree_root_index = Subtree::find_subtree_root(index); + self.get_subtree(subtree_root_index) + .and_then(|maybe_subtree| match maybe_subtree { + Some(mut subtree) => { + let old_node = subtree.remove_inner_node(index); + let db_operation_result = if subtree.is_empty() { + self.remove_subtree(subtree_root_index) + } else { + self.set_subtree(&subtree) + }; + db_operation_result.map(|_| old_node) + }, + None => Ok(None), + }) + } + + /// Applies a batch of `StorageUpdates` atomically to the RocksDB backend. + /// + /// This is the primary method for persisting changes to the SMT. It constructs a single + /// RocksDB `WriteBatch` containing all specified changes: + /// - Leaf updates/deletions in `LEAVES_CF`. + /// - Subtree updates/deletions in `SUBTREE_24_CF`, `SUBTREE_32_CF`, `SUBTREE_40_CF`, + /// `SUBTREE_48_CF`, `SUBTREE_56_CF`. + /// - Updates to leaf and entry counts in `METADATA_CF` based on `leaf_count_delta` and + /// `entry_count_delta`. + /// - Sets the new SMT root in `METADATA_CF`. + /// + /// All operations in the batch are applied atomically by RocksDB. + /// + /// # Errors + /// - `StorageError::Backend`: If any column family is missing or a RocksDB write error occurs. + fn apply(&self, updates: StorageUpdates) -> Result<(), StorageError> { + use rayon::prelude::*; + + let mut batch = WriteBatch::default(); + + let leaves_cf = self.cf_handle(LEAVES_CF)?; + let metadata_cf = self.cf_handle(METADATA_CF)?; + let depth24_cf = self.cf_handle(DEPTH_24_CF)?; + + let StorageUpdateParts { + leaf_updates, + subtree_updates, + new_root, + leaf_count_delta, + entry_count_delta, + } = updates.into_parts(); + + // Process leaf updates + for (index, maybe_leaf) in leaf_updates { + let key = Self::index_db_key(index); + match maybe_leaf { + Some(leaf) => batch.put_cf(leaves_cf, key, leaf.to_bytes()), + None => batch.delete_cf(leaves_cf, key), + } + } + + // Helper for depth 24 operations + let is_depth_24 = |index: NodeIndex| index.depth() == IN_MEMORY_DEPTH; + + // Parallel preparation of subtree operations + let subtree_ops: Result, StorageError> = subtree_updates + .into_par_iter() + .map(|update| -> Result<_, StorageError> { + let (index, maybe_bytes, depth24_op) = match update { + SubtreeUpdate::Store { index, subtree } => { + let bytes = subtree.to_vec(); + let depth24_op = is_depth_24(index) + .then(|| subtree.get_inner_node(index)) + .flatten() + .map(|root_node| { + let hash_key = Self::index_db_key(index.value()); + (hash_key, Some(root_node.hash().to_bytes())) + }); + (index, Some(bytes), depth24_op) + }, + SubtreeUpdate::Delete { index } => { + let depth24_op = is_depth_24(index).then(|| { + let hash_key = Self::index_db_key(index.value()); + (hash_key, None) + }); + (index, None, depth24_op) + }, + }; + + let key = Self::subtree_db_key(index); + let subtrees_cf = self.subtree_cf(index); + + Ok((subtrees_cf, key, maybe_bytes, depth24_op)) + }) + .collect(); + + // Sequential batch building + for (subtrees_cf, key, maybe_bytes, depth24_op) in subtree_ops? { + match maybe_bytes { + Some(bytes) => batch.put_cf(subtrees_cf, key, bytes), + None => batch.delete_cf(subtrees_cf, key), + } + + if let Some((hash_key, maybe_hash_bytes)) = depth24_op { + match maybe_hash_bytes { + Some(hash_bytes) => batch.put_cf(depth24_cf, hash_key, hash_bytes), + None => batch.delete_cf(depth24_cf, hash_key), + } + } + } + + if leaf_count_delta != 0 || entry_count_delta != 0 { + let current_leaf_count = self.leaf_count()?; + let current_entry_count = self.entry_count()?; + + let new_leaf_count = current_leaf_count.saturating_add_signed(leaf_count_delta); + let new_entry_count = current_entry_count.saturating_add_signed(entry_count_delta); + + batch.put_cf(metadata_cf, LEAF_COUNT_KEY, new_leaf_count.to_be_bytes()); + batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, new_entry_count.to_be_bytes()); + } + + batch.put_cf(metadata_cf, ROOT_KEY, new_root.to_bytes()); + + let mut write_opts = rocksdb::WriteOptions::default(); + // Disable immediate WAL sync to disk for better performance + write_opts.set_sync(false); + self.db.write_opt(batch, &write_opts).map_err(map_rocksdb_err)?; + + Ok(()) + } + + /// Returns an iterator over all (logical u64 index, `SmtLeaf`) pairs in the `LEAVES_CF`. + /// + /// The iterator uses a RocksDB snapshot for consistency and iterates in lexicographical + /// order of the keys (leaf indices). Errors during iteration (e.g., deserialization issues) + /// cause the iterator to skip the problematic item and attempt to continue. + /// + /// # Errors + /// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs + /// during iterator creation. + fn iter_leaves(&self) -> Result + '_>, StorageError> { + let cf = self.cf_handle(LEAVES_CF)?; + let mut read_opts = ReadOptions::default(); + read_opts.set_total_order_seek(true); + let db_iter = self.db.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + + Ok(Box::new(RocksDbDirectLeafIterator { iter: db_iter })) + } + + /// Returns an iterator over all `Subtree` instances across all subtree column families. + /// + /// The iterator uses a RocksDB snapshot and iterates in lexicographical order of keys + /// (subtree root NodeIndex) across all depth column families (24, 32, 40, 48, 56). + /// Errors during iteration (e.g., deserialization issues) cause the iterator to skip + /// the problematic item and attempt to continue. + /// + /// # Errors + /// - `StorageError::Backend`: If any subtree column family is missing or a RocksDB error occurs + /// during iterator creation. + fn iter_subtrees(&self) -> Result + '_>, StorageError> { + // All subtree column family names in order + const SUBTREE_CFS: [&str; 5] = + [SUBTREE_24_CF, SUBTREE_32_CF, SUBTREE_40_CF, SUBTREE_48_CF, SUBTREE_56_CF]; + + let mut cf_handles = Vec::new(); + for cf_name in SUBTREE_CFS { + cf_handles.push(self.cf_handle(cf_name)?); + } + + Ok(Box::new(RocksDbSubtreeIterator::new(&self.db, cf_handles))) + } + + /// Retrieves all depth 24 hashes for fast tree rebuilding. + /// + /// # Errors + /// - `StorageError::Backend`: If the depth24 column family is missing or a RocksDB error + /// occurs. + /// - `StorageError::Value`: If any hash bytes are corrupt. + fn get_depth24(&self) -> Result, StorageError> { + let cf = self.cf_handle(DEPTH_24_CF)?; + let iter = self.db.iterator_cf(cf, IteratorMode::Start); + let mut hashes = Vec::new(); + + for item in iter { + let (key_bytes, value_bytes) = item.map_err(map_rocksdb_err)?; + + let index = index_from_key_bytes(&key_bytes)?; + let hash = Word::read_from_bytes(&value_bytes)?; + + hashes.push((index, hash)); + } + + Ok(hashes) + } +} + +/// Syncs the RocksDB database to disk before dropping the storage. +/// +/// This ensures that all data is persisted to disk before the storage is dropped. +/// +/// # Panics +/// - If the RocksDB sync operation fails. +impl Drop for RocksDbStorage { + fn drop(&mut self) { + if let Err(e) = self.sync() { + panic!("failed to flush RocksDB on drop: {e}"); + } + } +} + +// ITERATORS +// -------------------------------------------------------------------------------------------- + +/// An iterator over leaves directly from RocksDB. +/// +/// Wraps a `DBIteratorWithThreadMode` and handles deserialization of keys to `u64` (leaf index) +/// and values to `SmtLeaf`. Skips items that fail to deserialize or if a RocksDB error occurs +/// for an item, attempting to continue iteration. +struct RocksDbDirectLeafIterator<'a> { + iter: DBIteratorWithThreadMode<'a, DB>, +} + +impl Iterator for RocksDbDirectLeafIterator<'_> { + type Item = (u64, SmtLeaf); + + fn next(&mut self) -> Option { + self.iter.find_map(|result| { + let (key_bytes, value_bytes) = result.ok()?; + let leaf_idx = index_from_key_bytes(&key_bytes).ok()?; + let leaf = SmtLeaf::read_from_bytes(&value_bytes).ok()?; + Some((leaf_idx, leaf)) + }) + } +} + +/// An iterator over subtrees from multiple RocksDB column families. +/// +/// Iterates through all subtree column families (24, 32, 40, 48, 56) sequentially. +/// When one column family is exhausted, it moves to the next one. +struct RocksDbSubtreeIterator<'a> { + db: &'a DB, + cf_handles: Vec<&'a rocksdb::ColumnFamily>, + current_cf_index: usize, + current_iter: Option>, +} + +impl<'a> RocksDbSubtreeIterator<'a> { + fn new(db: &'a DB, cf_handles: Vec<&'a rocksdb::ColumnFamily>) -> Self { + let mut iterator = Self { + db, + cf_handles, + current_cf_index: 0, + current_iter: None, + }; + iterator.advance_to_next_cf(); + iterator + } + + fn advance_to_next_cf(&mut self) { + if self.current_cf_index < self.cf_handles.len() { + let cf = self.cf_handles[self.current_cf_index]; + let mut read_opts = ReadOptions::default(); + read_opts.set_total_order_seek(true); + self.current_iter = Some(self.db.iterator_cf_opt(cf, read_opts, IteratorMode::Start)); + } else { + self.current_iter = None; + } + } + + fn try_next_from_iter( + iter: &mut DBIteratorWithThreadMode, + cf_index: usize, + ) -> Option { + iter.find_map(|result| { + let (key_bytes, value_bytes) = result.ok()?; + let depth = 24 + (cf_index * 8) as u8; + + let node_idx = subtree_root_from_key_bytes(&key_bytes, depth).ok()?; + let value_vec = value_bytes.into_vec(); + Subtree::from_vec(node_idx, &value_vec).ok() + }) + } +} + +impl Iterator for RocksDbSubtreeIterator<'_> { + type Item = Subtree; + + fn next(&mut self) -> Option { + loop { + let iter = self.current_iter.as_mut()?; + + // Try to get the next valid subtree from current iterator + if let Some(subtree) = Self::try_next_from_iter(iter, self.current_cf_index) { + return Some(subtree); + } + + // Current CF exhausted, advance to next + self.current_cf_index += 1; + self.advance_to_next_cf(); + + // If no more CFs, we're done + self.current_iter.as_ref()?; + } + } +} + +// ROCKSDB CONFIGURATION +// -------------------------------------------------------------------------------------------- + +/// Configuration for RocksDB storage used by the Sparse Merkle Tree implementation. +/// +/// This struct contains the essential configuration parameters needed to initialize +/// and optimize RocksDB for SMT storage operations. It provides sensible defaults +/// while allowing customization for specific performance requirements. +#[derive(Debug, Clone)] +pub struct RocksDbConfig { + /// The filesystem path where the RocksDB database will be stored. + /// + /// This should be a directory path that the application has read/write permissions for. + /// The database will create multiple files in this directory to store data, logs, and + /// metadata. + pub(crate) path: PathBuf, + + /// The size of the RocksDB block cache in bytes. + /// + /// This cache stores frequently accessed data blocks in memory to improve read performance. + /// Larger cache sizes generally improve read performance but consume more memory. + /// Default: 1GB (1 << 30 bytes) + pub(crate) cache_size: usize, + + /// The maximum number of files that RocksDB can have open simultaneously. + /// + /// This setting affects both memory usage and the number of file descriptors used by the + /// process. Higher values may improve performance for databases with many SST files but + /// increase resource usage. Default: 512 files + pub(crate) max_open_files: i32, +} + +impl RocksDbConfig { + /// Creates a new RocksDbConfig with the given database path and default settings. + /// + /// # Arguments + /// * `path` - The filesystem path where the RocksDB database will be stored. This can be any + /// type that converts into a `PathBuf`. + /// + /// # Default Settings + /// * `cache_size`: 1GB (1,073,741,824 bytes) + /// * `max_open_files`: 512 + /// + /// # Examples + /// ``` + /// use miden_large_smt_backend_rocksdb::RocksDbConfig; + /// + /// let config = RocksDbConfig::new("/path/to/database"); + /// ``` + pub fn new>(path: P) -> Self { + Self { + path: path.into(), + cache_size: 1 << 30, + max_open_files: 512, + } + } + + /// Sets the block cache size for RocksDB. + /// + /// The block cache stores frequently accessed data blocks in memory to improve read + /// performance. Larger cache sizes generally improve read performance but consume more + /// memory. + /// + /// # Arguments + /// * `size` - The cache size in bytes. + /// + /// # Examples + /// ``` + /// use miden_large_smt_backend_rocksdb::RocksDbConfig; + /// + /// let config = RocksDbConfig::new("/path/to/database") + /// .with_cache_size(2 * 1024 * 1024 * 1024); // 2GB cache + /// ``` + #[must_use] + pub fn with_cache_size(mut self, size: usize) -> Self { + self.cache_size = size; + self + } + + /// Sets the maximum number of files that RocksDB can have open simultaneously. + /// + /// This setting affects both memory usage and the number of file descriptors used by the + /// process. Higher values may improve performance for databases with many SST files but + /// increase resource usage. + /// + /// # Arguments + /// * `count` - The maximum number of open files. Must be positive. + /// + /// # Examples + /// ``` + /// use miden_large_smt_backend_rocksdb::RocksDbConfig; + /// + /// let config = RocksDbConfig::new("/path/to/database") + /// .with_max_open_files(1024); // Allow up to 1024 open files + /// ``` + #[must_use] + pub fn with_max_open_files(mut self, count: i32) -> Self { + self.max_open_files = count; + self + } +} + +// SUBTREE DB KEY +// -------------------------------------------------------------------------------------------- + +/// Compact key wrapper for variable-length subtree prefixes. +/// +/// * `bytes` always holds the big-endian 8-byte value. +/// * `len` is how many leading bytes are significant (3-7). +#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] +pub(crate) struct KeyBytes { + bytes: [u8; 8], + len: u8, +} + +impl KeyBytes { + #[inline(always)] + pub fn new(value: u64, keep: usize) -> Self { + debug_assert!((3..=7).contains(&keep)); + let bytes = value.to_be_bytes(); + debug_assert!(bytes[..8 - keep].iter().all(|&b| b == 0)); + Self { bytes, len: keep as u8 } + } + + #[inline(always)] + pub fn as_slice(&self) -> &[u8] { + &self.bytes[8 - self.len as usize..] + } +} + +impl AsRef<[u8]> for KeyBytes { + #[inline(always)] + fn as_ref(&self) -> &[u8] { + self.as_slice() + } +} + +// HELPERS +// -------------------------------------------------------------------------------------------- + +/// Deserializes an index (u64) from a RocksDB key byte slice. +/// Expects `key_bytes` to be exactly 8 bytes long. +/// +/// # Errors +/// - `StorageError::BadKeyLen`: If `key_bytes` is not 8 bytes long or conversion fails. +fn index_from_key_bytes(key_bytes: &[u8]) -> Result { + if key_bytes.len() != 8 { + return Err(StorageError::BadKeyLen { expected: 8, found: key_bytes.len() }); + } + let mut arr = [0u8; 8]; + arr.copy_from_slice(key_bytes); + Ok(u64::from_be_bytes(arr)) +} + +/// Reconstructs a `NodeIndex` from the variable-length subtree key stored in `RocksDB`. +/// +/// * `key_bytes` is the big-endian tail of the 64-bit value: +/// - depth 56 → 7 bytes +/// - depth 48 → 6 bytes +/// - depth 40 → 5 bytes +/// - depth 32 → 4 bytes +/// - depth 24 → 3 bytes +/// +/// # Errors +/// * `StorageError::Unsupported` - `depth` is not one of 24/32/40/48/56. +/// * `StorageError::DeserializationError` - `key_bytes.len()` does not match the length required by +/// `depth`. +#[inline(always)] +fn subtree_root_from_key_bytes(key_bytes: &[u8], depth: u8) -> Result { + let expected = match depth { + 24 => 3, + 32 => 4, + 40 => 5, + 48 => 6, + 56 => 7, + d => return Err(StorageError::Unsupported(format!("unsupported subtree depth {d}"))), + }; + + if key_bytes.len() != expected { + return Err(StorageError::BadSubtreeKeyLen { depth, expected, found: key_bytes.len() }); + } + let mut buf = [0u8; 8]; + buf[8 - expected..].copy_from_slice(key_bytes); + let value = u64::from_be_bytes(buf); + Ok(NodeIndex::new_unchecked(depth, value)) +} + +/// Helper that maps an SMT depth to its column family. +#[inline(always)] +fn cf_for_depth(depth: u8) -> &'static str { + match depth { + 24 => SUBTREE_24_CF, + 32 => SUBTREE_32_CF, + 40 => SUBTREE_40_CF, + 48 => SUBTREE_48_CF, + 56 => SUBTREE_56_CF, + _ => panic!("unsupported subtree depth: {depth}"), + } +} diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 4fd7ff0017..6110a7a6d4 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -15,28 +15,36 @@ workspace = true [dependencies] anyhow = { workspace = true } +diesel = { features = ["numeric", "sqlite"], workspace = true } +diesel_migrations = { features = ["sqlite"], workspace = true } futures = { workspace = true } -indexmap = { workspace = true } libsqlite3-sys = { workspace = true } +miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["tx-prover"], workspace = true } +miden-standards = { workspace = true } miden-tx = { default-features = true, workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } -tokio-util = { version = "0.7" } +tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } +[build-dependencies] +build-rs = { workspace = true } + [dev-dependencies] miden-node-test-macro = { path = "../test-macro" } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } -miden-standards = { workspace = true } +miden-standards = { features = ["testing"], workspace = true } +rand_chacha = { workspace = true } rstest = { workspace = true } +tempfile = { version = "3.20" } [package.metadata.cargo-machete] ignored = ["libsqlite3-sys"] diff --git a/crates/ntx-builder/build.rs b/crates/ntx-builder/build.rs new file mode 100644 index 0000000000..78883c50c1 --- /dev/null +++ b/crates/ntx-builder/build.rs @@ -0,0 +1,11 @@ +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in +// `src/db/migrations.rs` to include the latest version of the migrations into the binary, see +// . + +fn main() { + build_rs::output::rerun_if_changed("src/db/migrations"); + // If we do one re-write, the default rules are disabled, + // hence we need to trigger explicitly on `Cargo.toml`. + // + build_rs::output::rerun_if_changed("Cargo.toml"); +} diff --git a/crates/ntx-builder/diesel.toml b/crates/ntx-builder/diesel.toml new file mode 100644 index 0000000000..71215dbf76 --- /dev/null +++ b/crates/ntx-builder/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/db/schema.rs" diff --git a/crates/ntx-builder/src/actor/account_effect.rs b/crates/ntx-builder/src/actor/account_effect.rs new file mode 100644 index 0000000000..7a6acf0058 --- /dev/null +++ b/crates/ntx-builder/src/actor/account_effect.rs @@ -0,0 +1,42 @@ +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{Account, AccountDelta, AccountId}; + +// NETWORK ACCOUNT EFFECT +// ================================================================================================ + +/// Represents the effect of a transaction on a network account. +#[derive(Clone)] +pub enum NetworkAccountEffect { + Created(Account), + Updated(AccountDelta), +} + +impl NetworkAccountEffect { + pub fn from_protocol(update: &AccountUpdateDetails) -> Option { + let update = match update { + AccountUpdateDetails::Private => return None, + AccountUpdateDetails::Delta(update) if update.is_full_state() => { + NetworkAccountEffect::Created( + Account::try_from(update) + .expect("Account should be derivable by full state AccountDelta"), + ) + }, + AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), + }; + + update.protocol_account_id().is_network().then_some(update) + } + + pub fn network_account_id(&self) -> NetworkAccountId { + // SAFETY: This is a network account by construction. + self.protocol_account_id().try_into().unwrap() + } + + fn protocol_account_id(&self) -> AccountId { + match self { + NetworkAccountEffect::Created(acc) => acc.id(), + NetworkAccountEffect::Updated(delta) => delta.id(), + } + } +} diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index 25020c8b2d..753dfee8a6 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -1,23 +1,10 @@ -use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::num::NonZeroUsize; +use std::sync::Arc; -use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; -use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::account::Account; -use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::block::{BlockHeader, BlockNumber}; -use miden_protocol::note::{Note, Nullifier}; -use miden_protocol::transaction::{PartialBlockchain, TransactionId}; -use tracing::instrument; +use miden_protocol::block::BlockHeader; +use miden_protocol::transaction::PartialBlockchain; -use super::ActorShutdownReason; -use super::note_state::{NetworkAccountEffect, NetworkAccountNoteState}; -use crate::COMPONENT; use crate::actor::inflight_note::InflightNetworkNote; -use crate::builder::ChainState; -use crate::store::{StoreClient, StoreError}; // TRANSACTION CANDIDATE // ================================================================================================ @@ -40,656 +27,7 @@ pub struct TransactionCandidate { pub chain_tip_header: BlockHeader, /// The chain MMR, which lags behind the tip by one block. - pub chain_mmr: PartialBlockchain, -} - -// NETWORK ACCOUNT STATE -// ================================================================================================ - -/// The current state of a network account. -#[derive(Clone)] -pub struct NetworkAccountState { - /// The network account ID corresponding to the network account this state represents. - account_id: NetworkAccountId, - - /// Component of this state which Contains the committed and inflight account updates as well - /// as available and nullified notes. - account: NetworkAccountNoteState, - - /// Uncommitted transactions which have some impact on the network state. - /// - /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ - /// an impact are ignored. - inflight_txs: BTreeMap, - - /// Nullifiers of all network notes targeted at this account. - /// - /// Used to filter mempool events: when a `TransactionAdded` event reports consumed nullifiers, - /// only those present in this set are processed (moved from `available_notes` to - /// `nullified_notes`). Nullifiers are added when notes are loaded or created, and removed - /// when the consuming transaction is committed. - known_nullifiers: HashSet, -} - -impl NetworkAccountState { - /// Maximum number of attempts to execute a network note. - const MAX_NOTE_ATTEMPTS: usize = 30; - - /// Load's all available network notes from the store, along with the required account states. - #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] - pub async fn load( - account: Account, - account_id: NetworkAccountId, - store: &StoreClient, - block_num: BlockNumber, - ) -> Result { - let notes = store.get_unconsumed_network_notes(account_id, block_num.as_u32()).await?; - let notes = notes - .into_iter() - .map(|note| { - let NetworkNote::SingleTarget(note) = note; - note - }) - .collect::>(); - - let known_nullifiers: HashSet = - notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - - let account = NetworkAccountNoteState::new(account, notes); - - let state = Self { - account, - account_id, - inflight_txs: BTreeMap::default(), - known_nullifiers, - }; - - state.inject_telemetry(); - - Ok(state) - } - - /// Selects the next candidate network transaction. - #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] - pub fn select_candidate( - &mut self, - limit: NonZeroUsize, - chain_state: ChainState, - ) -> Option { - // Remove notes that have failed too many times. - self.account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); - - // Skip empty accounts, and prune them. - // This is how we keep the number of accounts bounded. - if self.account.is_empty() { - return None; - } - - // Select notes from the account that can be consumed or are ready for a retry. - let notes = self - .account - .available_notes(&chain_state.chain_tip_header.block_num()) - .take(limit.get()) - .cloned() - .collect::>(); - - // Skip accounts with no available notes. - if notes.is_empty() { - return None; - } - - let (chain_tip_header, chain_mmr) = chain_state.into_parts(); - TransactionCandidate { - account: self.account.latest_account(), - notes, - chain_tip_header, - chain_mmr, - } - .into() - } - - /// Marks notes of a previously selected candidate as failed. /// - /// Does not remove the candidate from the in-progress pool. - #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] - pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { - let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - self.account.fail_notes(nullifiers.as_slice(), block_num); - } - - /// Updates state with the mempool event. - #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] - pub fn mempool_update(&mut self, update: &MempoolEvent) -> Option { - let span = tracing::Span::current(); - span.set_attribute("mempool_event.kind", update.kind()); - - match update { - MempoolEvent::TransactionAdded { - id, - nullifiers, - network_notes, - account_delta, - } => { - // Filter network notes relevant to this account. - let network_notes = filter_by_account_id_and_map_to_single_target( - self.account_id, - network_notes.clone(), - ); - self.add_transaction(*id, nullifiers, &network_notes, account_delta.as_ref()); - }, - MempoolEvent::TransactionsReverted(txs) => { - for tx in txs { - let shutdown_reason = self.revert_transaction(*tx); - if shutdown_reason.is_some() { - return shutdown_reason; - } - } - }, - MempoolEvent::BlockCommitted { txs, .. } => { - for tx in txs { - self.commit_transaction(*tx); - } - }, - } - self.inject_telemetry(); - - // No shutdown, continue running actor. - None - } - - /// Handles a [`MempoolEvent::TransactionAdded`] event. - fn add_transaction( - &mut self, - id: TransactionId, - nullifiers: &[Nullifier], - network_notes: &[SingleTargetNetworkNote], - account_delta: Option<&AccountUpdateDetails>, - ) { - // Skip transactions we already know about. - // - // This can occur since both ntx builder and the mempool might inform us of the same - // transaction. Once when it was submitted to the mempool, and once by the mempool event. - if self.inflight_txs.contains_key(&id) { - return; - } - - let mut tx_impact = TransactionImpact::default(); - if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { - let account_id = update.network_account_id(); - if account_id == self.account_id { - match update { - NetworkAccountEffect::Updated(account_delta) => { - self.account.add_delta(&account_delta); - tx_impact.account_delta = Some(account_id); - }, - NetworkAccountEffect::Created(_) => {}, - } - } - } - for note in network_notes { - assert_eq!( - note.account_id(), - self.account_id, - "note's account ID does not match network account actor's account ID" - ); - tx_impact.notes.insert(note.nullifier()); - self.known_nullifiers.insert(note.nullifier()); - self.account.add_note(note.clone()); - } - for nullifier in nullifiers { - // Ignore nullifiers that aren't network note nullifiers. - if !self.known_nullifiers.contains(nullifier) { - continue; - } - tx_impact.nullifiers.insert(*nullifier); - // We don't use the entry wrapper here because the account must already exist. - let _ = self.account.add_nullifier(*nullifier); - } - - if !tx_impact.is_empty() { - self.inflight_txs.insert(id, tx_impact); - } - } - - /// Handles [`MempoolEvent::BlockCommitted`] events. - fn commit_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(delta_account_id) = impact.account_delta { - if delta_account_id == self.account_id { - self.account.commit_delta(); - } - } - - for nullifier in impact.nullifiers { - if self.known_nullifiers.remove(&nullifier) { - // Its possible for the account to no longer exist if the transaction creating it - // was reverted. - self.account.commit_nullifier(nullifier); - } - } - } - - /// Handles [`MempoolEvent::TransactionsReverted`] events. - fn revert_transaction(&mut self, tx: TransactionId) -> Option { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - tracing::debug!("transaction {tx} not found in inflight transactions"); - return None; - }; - - // Revert account creation. - if let Some(account_id) = impact.account_delta { - // Account creation reverted, actor must stop. - if account_id == self.account_id && self.account.revert_delta() { - return Some(ActorShutdownReason::AccountReverted(account_id)); - } - } - - // Revert notes. - for note_nullifier in impact.notes { - if self.known_nullifiers.contains(¬e_nullifier) { - self.account.revert_note(note_nullifier); - self.known_nullifiers.remove(¬e_nullifier); - } - } - - // Revert nullifiers. - for nullifier in impact.nullifiers { - if self.known_nullifiers.contains(&nullifier) { - self.account.revert_nullifier(nullifier); - self.known_nullifiers.remove(&nullifier); - } - } - - None - } - - /// Adds stats to the current tracing span. - /// - /// Note that these are only visible in the OpenTelemetry context, as conventional tracing - /// does not track fields added dynamically. - fn inject_telemetry(&self) { - let span = tracing::Span::current(); - - span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); - span.set_attribute("ntx.state.notes.total", self.known_nullifiers.len()); - } -} - -/// The impact a transaction has on the state. -#[derive(Clone, Default)] -struct TransactionImpact { - /// The network account this transaction added an account delta to. - account_delta: Option, - - /// Network notes this transaction created. - notes: BTreeSet, - - /// Network notes this transaction consumed. - nullifiers: BTreeSet, -} - -impl TransactionImpact { - fn is_empty(&self) -> bool { - self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() - } -} - -/// Filters network notes by account ID and maps them to single target network notes. -fn filter_by_account_id_and_map_to_single_target( - account_id: NetworkAccountId, - notes: Vec, -) -> Vec { - notes - .into_iter() - .filter_map(|note| match note { - NetworkNote::SingleTarget(note) if note.account_id() == account_id => Some(note), - NetworkNote::SingleTarget(_) => None, - }) - .collect::>() -} - -#[cfg(test)] -mod tests { - use std::collections::HashSet; - use std::sync::{Arc, Mutex}; - - use miden_protocol::account::{AccountBuilder, AccountStorageMode, AccountType}; - use miden_protocol::asset::{Asset, FungibleAsset}; - use miden_protocol::crypto::rand::RpoRandomCoin; - use miden_protocol::note::{Note, NoteAttachment, NoteExecutionHint, NoteType}; - use miden_protocol::testing::account_id::AccountIdBuilder; - use miden_protocol::transaction::TransactionId; - use miden_protocol::{EMPTY_WORD, Felt, Hasher}; - use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; - - use super::*; - - // HELPERS - // ============================================================================================ - - /// Creates a network account for testing. - fn create_network_account(seed: u8) -> Account { - use miden_protocol::testing::noop_auth_component::NoopAuthComponent; - use miden_standards::account::wallets::BasicWallet; - - AccountBuilder::new([seed; 32]) - .account_type(AccountType::RegularAccountUpdatableCode) - .storage_mode(AccountStorageMode::Network) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .expect("should be able to build test account") - } - - /// Creates a faucet account ID for testing. - fn create_faucet_id(seed: u8) -> miden_protocol::account::AccountId { - AccountIdBuilder::new() - .account_type(AccountType::FungibleFaucet) - .storage_mode(AccountStorageMode::Public) - .build_with_seed([seed; 32]) - } - - /// Creates a note targeted at the given network account. - fn create_network_note( - target_account_id: miden_protocol::account::AccountId, - seed: u8, - ) -> Note { - let coin_seed: [u64; 4] = - [u64::from(seed), u64::from(seed) + 1, u64::from(seed) + 2, u64::from(seed) + 3]; - let rng = Arc::new(Mutex::new(RpoRandomCoin::new(coin_seed.map(Felt::new).into()))); - let mut rng = rng.lock().unwrap(); - - let faucet_id = create_faucet_id(seed.wrapping_add(100)); - - let target = NetworkAccountTarget::new(target_account_id, NoteExecutionHint::Always) - .expect("NetworkAccountTarget creation should succeed for network account"); - let attachment: NoteAttachment = target.into(); - - create_p2id_note( - target_account_id, - target_account_id, - vec![Asset::Fungible(FungibleAsset::new(faucet_id, 10).unwrap())], - NoteType::Public, - attachment, - &mut *rng, - ) - .expect("note creation should succeed") - } - - /// Creates a `SingleTargetNetworkNote` from a `Note`. - fn to_single_target_note(note: Note) -> SingleTargetNetworkNote { - SingleTargetNetworkNote::try_from(note).expect("should convert to SingleTargetNetworkNote") - } - - /// Creates a mock `TransactionId` for testing. - fn mock_tx_id(seed: u8) -> TransactionId { - TransactionId::new( - Hasher::hash(&[seed; 32]), - Hasher::hash(&[seed.wrapping_add(1); 32]), - EMPTY_WORD, - EMPTY_WORD, - ) - } - - /// Creates a mock `BlockHeader` for testing. - fn mock_block_header(block_num: u32) -> miden_protocol::block::BlockHeader { - use miden_node_utils::fee::test_fee_params; - use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; - - miden_protocol::block::BlockHeader::new( - 0, - EMPTY_WORD, - BlockNumber::from(block_num), - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - SecretKey::new().public_key(), - test_fee_params(), - 0, - ) - } - - impl NetworkAccountState { - /// Creates a new `NetworkAccountState` for testing. - /// - /// This mirrors the behavior of `load()` but with provided notes instead of - /// fetching from the store. - #[cfg(test)] - pub fn new_for_testing( - account: Account, - account_id: NetworkAccountId, - notes: Vec, - ) -> Self { - let known_nullifiers: HashSet = - notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - - let account = NetworkAccountNoteState::new(account, notes); - - Self { - account, - account_id, - inflight_txs: BTreeMap::default(), - known_nullifiers, - } - } - } - - // TESTS - // ============================================================================================ - - /// Tests that initial notes loaded into `NetworkAccountState` have their nullifiers - /// registered in `known_nullifiers`. - #[test] - fn test_initial_notes_have_nullifiers_indexed() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let note2 = to_single_target_note(create_network_note(account_id, 2)); - let nullifier1 = note1.nullifier(); - let nullifier2 = note2.nullifier(); - - let state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - - assert!( - state.known_nullifiers.contains(&nullifier1), - "known_nullifiers should contain first note's nullifier" - ); - assert!( - state.known_nullifiers.contains(&nullifier2), - "known_nullifiers should contain second note's nullifier" - ); - assert_eq!( - state.known_nullifiers.len(), - 2, - "known_nullifiers should have exactly 2 entries" - ); - } - - /// Tests that when a `TransactionAdded` event arrives with nullifiers from initial notes, - /// those notes are properly moved from `available_notes` to `nullified_notes`. - #[test] - fn test_mempool_event_nullifies_initial_notes() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let note2 = to_single_target_note(create_network_note(account_id, 2)); - let nullifier1 = note1.nullifier(); - let nullifier2 = note2.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); - assert_eq!(available_count, 2, "both notes should be available initially"); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - - let shutdown = state.mempool_update(&event); - assert!(shutdown.is_none(), "mempool_update should not trigger shutdown"); - - let available_nullifiers: Vec<_> = state - .account - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - !available_nullifiers.contains(&nullifier1), - "note1 should no longer be available" - ); - assert!(available_nullifiers.contains(&nullifier2), "note2 should still be available"); - assert_eq!(available_nullifiers.len(), 1, "only one note should be available"); - - assert!( - state.inflight_txs.contains_key(&tx_id), - "transaction should be tracked in inflight_txs" - ); - } - - /// Tests that after committing a transaction, the nullifier is removed from `known_nullifiers`. - #[test] - fn test_commit_removes_nullifier_from_index() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let nullifier1 = note1.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1]); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - state.mempool_update(&event); - - assert!( - state.known_nullifiers.contains(&nullifier1), - "nullifier should still be in index while transaction is inflight" - ); - - let commit_event = MempoolEvent::BlockCommitted { - header: Box::new(mock_block_header(1)), - txs: vec![tx_id], - }; - state.mempool_update(&commit_event); - - assert!( - !state.known_nullifiers.contains(&nullifier1), - "nullifier should be removed from index after commit" - ); - } - - /// Tests that reverting a transaction restores the note to `available_notes`. - #[test] - fn test_revert_restores_note_to_available() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let nullifier1 = note1.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1]); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - state.mempool_update(&event); - - // Verify note is not available - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); - assert_eq!(available_count, 0, "note should not be available after being consumed"); - - // Revert the transaction - let revert_event = - MempoolEvent::TransactionsReverted(HashSet::from_iter(std::iter::once(tx_id))); - state.mempool_update(&revert_event); - - // Verify note is available again - let available_nullifiers: Vec<_> = state - .account - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - available_nullifiers.contains(&nullifier1), - "note should be available again after revert" - ); - } - - /// Tests that nullifiers from dynamically added notes are also indexed. - #[test] - fn test_dynamically_added_notes_are_indexed() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let mut state = NetworkAccountState::new_for_testing(account, network_account_id, vec![]); - - assert!(state.known_nullifiers.is_empty(), "known_nullifiers should be empty initially"); - - let new_note = to_single_target_note(create_network_note(account_id, 1)); - let new_nullifier = new_note.nullifier(); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![], - network_notes: vec![NetworkNote::SingleTarget(new_note)], - account_delta: None, - }; - - state.mempool_update(&event); - - // Verify the new note's nullifier is now indexed - assert!( - state.known_nullifiers.contains(&new_nullifier), - "dynamically added note's nullifier should be indexed" - ); - - // Verify the note is available - let available_nullifiers: Vec<_> = state - .account - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - available_nullifiers.contains(&new_nullifier), - "dynamically added note should be available" - ); - } + /// Wrapped in `Arc` to avoid expensive clones when reading the chain state. + pub chain_mmr: Arc, } diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index edcf58c07e..9b45a48c2c 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -11,6 +11,7 @@ use miden_protocol::account::{ AccountId, AccountStorageHeader, PartialAccount, + StorageMapKey, StorageMapWitness, StorageSlotName, StorageSlotType, @@ -31,7 +32,7 @@ use miden_protocol::transaction::{ TransactionInputs, }; use miden_protocol::vm::FutureMaybeSend; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use miden_tx::auth::UnreachableAuth; use miden_tx::utils::Serializable; use miden_tx::{ @@ -55,6 +56,7 @@ use tracing::{Instrument, instrument}; use crate::COMPONENT; use crate::actor::account_state::TransactionCandidate; use crate::block_producer::BlockProducerClient; +use crate::db::Db; use crate::store::StoreClient; #[derive(Debug, thiserror::Error)] @@ -77,13 +79,19 @@ pub enum NtxError { type NtxResult = Result; +/// The result of a successful transaction execution. +/// +/// Contains the transaction ID, any notes that failed during filtering, and note scripts fetched +/// from the remote store that should be persisted to the local DB cache. +pub type NtxExecutionResult = (TransactionId, Vec, Vec<(Word, NoteScript)>); + // NETWORK TRANSACTION CONTEXT // ================================================================================================ /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - /// TODO(sergerad): Remove block producer client when block proving moved to store. + /// Client for submitting proven transactions to the Block Producer. block_producer: BlockProducerClient, /// Client for validating transactions via the Validator. @@ -100,6 +108,9 @@ pub struct NtxContext { /// LRU cache for storing retrieved note scripts to avoid repeated store calls. script_cache: LruCache, + + /// Local database for persistent note script caching. + db: Db, } impl NtxContext { @@ -110,6 +121,7 @@ impl NtxContext { prover: Option, store: StoreClient, script_cache: LruCache, + db: Db, ) -> Self { Self { block_producer, @@ -117,6 +129,7 @@ impl NtxContext { prover, store, script_cache, + db, } } @@ -132,8 +145,9 @@ impl NtxContext { /// /// # Returns /// - /// On success, returns the [`TransactionId`] of the executed transaction and a list of - /// [`FailedNote`]s representing notes that were filtered out before execution. + /// On success, returns an [`NtxExecutionResult`] containing the transaction ID, any notes + /// that failed during filtering, and note scripts fetched from the remote store that should + /// be persisted to the local DB cache. /// /// # Errors /// @@ -146,7 +160,7 @@ impl NtxContext { pub fn execute_transaction( self, tx: TransactionCandidate, - ) -> impl FutureMaybeSend)>> { + ) -> impl FutureMaybeSend> { let TransactionCandidate { account, notes, @@ -168,6 +182,7 @@ impl NtxContext { chain_mmr, self.store.clone(), self.script_cache.clone(), + self.db.clone(), ); // Filter notes. @@ -178,6 +193,9 @@ impl NtxContext { // Execute transaction. let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; + // Collect scripts fetched from the remote store during execution. + let scripts_to_cache = data_store.take_fetched_scripts().await; + // Prove transaction. let tx_inputs: TransactionInputs = executed_tx.into(); let proven_tx = Box::pin(self.prove(&tx_inputs)).await?; @@ -188,7 +206,7 @@ impl NtxContext { // Submit transaction to block producer. self.submit(&proven_tx).await?; - Ok((proven_tx.id(), failed_notes)) + Ok((proven_tx.id(), failed_notes, scripts_to_cache)) }) .in_current_span() .await @@ -327,12 +345,18 @@ impl NtxContext { struct NtxDataStore { account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + /// The chain MMR, wrapped in `Arc` to avoid expensive clones when reading the chain state. + chain_mmr: Arc, mast_store: TransactionMastStore, /// Store client for retrieving note scripts. store: StoreClient, /// LRU cache for storing retrieved note scripts to avoid repeated store calls. script_cache: LruCache, + /// Local database for persistent note script. + db: Db, + /// Scripts fetched from the remote store during execution, to be persisted by the + /// coordinator. + fetched_scripts: Arc>>, /// Mapping of storage map roots to storage slot names observed during various calls. /// /// The registered slot names are subsequently used to retrieve storage map witnesses from the @@ -362,9 +386,10 @@ impl NtxDataStore { fn new( account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + chain_mmr: Arc, store: StoreClient, script_cache: LruCache, + db: Db, ) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(account.code()); @@ -376,10 +401,17 @@ impl NtxDataStore { mast_store, store, script_cache, + db, + fetched_scripts: Arc::new(Mutex::new(Vec::new())), storage_slots: Arc::new(Mutex::new(BTreeMap::default())), } } + /// Returns the list of note scripts fetched from the remote store during execution. + async fn take_fetched_scripts(&self) -> Vec<(Word, NoteScript)> { + self.fetched_scripts.lock().await.drain(..).collect() + } + /// Registers storage map slot names for the given account ID and storage header. /// /// These slot names are subsequently used to query for storage map witnesses against the store. @@ -421,7 +453,7 @@ impl DataStore for NtxDataStore { .await; let partial_account = PartialAccount::from(&self.account); - Ok((partial_account, self.reference_block.clone(), self.chain_mmr.clone())) + Ok((partial_account, self.reference_block.clone(), (*self.chain_mmr).clone())) } } @@ -477,7 +509,7 @@ impl DataStore for NtxDataStore { &self, account_id: AccountId, map_root: Word, - map_key: Word, + map_key: StorageMapKey, ) -> impl FutureMaybeSend> { async move { // The slot name that corresponds to the given account ID and map root must have been @@ -506,28 +538,40 @@ impl DataStore for NtxDataStore { /// Retrieves a note script by its root hash. /// - /// This implementation uses the configured RPC client to call the `GetNoteScriptByRoot` - /// endpoint on the RPC server. + /// Uses a 3-tier lookup strategy: + /// 1. In-memory LRU cache. + /// 2. Local SQLite database. + /// 3. Remote store via gRPC. fn get_note_script( &self, script_root: Word, ) -> impl FutureMaybeSend, DataStoreError>> { async move { - // Attempt to retrieve the script from the cache. + // 1. In-memory LRU cache. if let Some(cached_script) = self.script_cache.get(&script_root).await { return Ok(Some(cached_script)); } - // Retrieve the script from the store. + // 2. Local DB. + if let Some(script) = self.db.lookup_note_script(script_root).await.map_err(|err| { + DataStoreError::other_with_source("failed to look up note script in local DB", err) + })? { + self.script_cache.put(script_root, script.clone()).await; + return Ok(Some(script)); + } + + // 3. Remote store. let maybe_script = self.store.get_note_script_by_root(script_root).await.map_err(|err| { - DataStoreError::Other { - error_msg: "failed to retrieve note script from store".to_string().into(), - source: Some(err.into()), - } + DataStoreError::other_with_source( + "failed to retrieve note script from store", + err, + ) })?; - // Handle response. + if let Some(script) = maybe_script { + // Collect for later persistence by the coordinator. + self.fetched_scripts.lock().await.push((script_root, script.clone())); self.script_cache.put(script_root, script.clone()).await; Ok(Some(script)) } else { diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs index 23c7d06d72..401cc7d00a 100644 --- a/crates/ntx-builder/src/actor/inflight_note.rs +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -1,6 +1,6 @@ -use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::block::BlockNumber; -use miden_protocol::note::Note; +use miden_protocol::note::{Note, Nullifier}; +use miden_standards::note::AccountTargetNetworkNote; use crate::actor::has_backoff_passed; @@ -14,14 +14,14 @@ use crate::actor::has_backoff_passed; /// will likely be soon after the number that is recorded here. #[derive(Debug, Clone)] pub struct InflightNetworkNote { - note: SingleTargetNetworkNote, + note: AccountTargetNetworkNote, attempt_count: usize, last_attempt: Option, } impl InflightNetworkNote { /// Creates a new inflight network note. - pub fn new(note: SingleTargetNetworkNote) -> Self { + pub fn new(note: AccountTargetNetworkNote) -> Self { Self { note, attempt_count: 0, @@ -29,13 +29,22 @@ impl InflightNetworkNote { } } + /// Reconstructs an inflight network note from its constituent parts (e.g., from DB rows). + pub fn from_parts( + note: AccountTargetNetworkNote, + attempt_count: usize, + last_attempt: Option, + ) -> Self { + Self { note, attempt_count, last_attempt } + } + /// Consumes the inflight network note and returns the inner network note. - pub fn into_inner(self) -> SingleTargetNetworkNote { + pub fn into_inner(self) -> AccountTargetNetworkNote { self.note } /// Returns a reference to the inner network note. - pub fn to_inner(&self) -> &SingleTargetNetworkNote { + pub fn to_inner(&self) -> &AccountTargetNetworkNote { &self.note } @@ -48,7 +57,7 @@ impl InflightNetworkNote { /// /// The note is available if the backoff period has passed. pub fn is_available(&self, block_num: BlockNumber) -> bool { - self.note.can_be_consumed(block_num).unwrap_or(true) + self.note.execution_hint().can_be_consumed(block_num).unwrap_or(true) && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) } @@ -57,10 +66,14 @@ impl InflightNetworkNote { self.last_attempt = Some(block_num); self.attempt_count += 1; } + + pub fn nullifier(&self) -> Nullifier { + self.note.as_note().nullifier() + } } impl From for Note { fn from(value: InflightNetworkNote) -> Self { - value.into_inner().into() + value.into_inner().into_note() } } diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index ae8f63629e..4533b62595 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -1,12 +1,13 @@ +pub(crate) mod account_effect; pub mod account_state; mod execute; -mod inflight_note; -mod note_state; +pub(crate) mod inflight_note; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; -use account_state::{NetworkAccountState, TransactionCandidate}; +use account_state::TransactionCandidate; use futures::FutureExt; use miden_node_proto::clients::{Builder, ValidatorClient}; use miden_node_proto::domain::account::NetworkAccountId; @@ -16,24 +17,39 @@ use miden_node_utils::lru_cache::LruCache; use miden_protocol::Word; use miden_protocol::account::{Account, AccountDelta}; use miden_protocol::block::BlockNumber; -use miden_protocol::note::NoteScript; +use miden_protocol::note::{NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use url::Url; +use crate::actor::inflight_note::InflightNetworkNote; use crate::block_producer::BlockProducerClient; use crate::builder::ChainState; +use crate::db::Db; use crate::store::StoreClient; +// ACTOR NOTIFICATION +// ================================================================================================ + +/// A notification sent from an account actor to the coordinator. +pub enum ActorNotification { + /// One or more notes failed during transaction execution and should have their attempt + /// counters incremented. + NotesFailed { + nullifiers: Vec, + block_num: BlockNumber, + }, + /// A note script was fetched from the remote store and should be persisted to the local DB. + CacheNoteScript { script_root: Word, script: NoteScript }, +} + // ACTOR SHUTDOWN REASON // ================================================================================================ /// The reason an actor has shut down. pub enum ActorShutdownReason { - /// Occurs when the transaction that created the actor is reverted. - AccountReverted(NetworkAccountId), /// Occurs when an account actor detects failure in the messaging channel used by the /// coordinator. EventChannelClosed, @@ -66,6 +82,14 @@ pub struct AccountActorContext { /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. /// This cache is shared across all account actors to maximize cache efficiency. pub script_cache: LruCache, + /// Maximum number of notes per transaction. + pub max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + pub max_note_attempts: usize, + /// Database for persistent state. + pub db: Db, + /// Channel for sending notifications to the coordinator (via the builder event loop). + pub notification_tx: mpsc::Sender, } // ACCOUNT ORIGIN @@ -127,10 +151,10 @@ enum ActorMode { /// /// ## Core Responsibilities /// -/// - **State Management**: Loads and maintains the current state of network accounts, including -/// available notes, pending transactions, and account commitments. +/// - **State Management**: Queries the database for the current state of network accounts, +/// including available notes and the latest account state. /// - **Transaction Selection**: Selects viable notes and constructs a [`TransactionCandidate`] -/// based on current chain state. +/// based on current chain state and DB queries. /// - **Transaction Execution**: Executes selected transactions using either local or remote /// proving. /// - **Mempool Integration**: Listens for mempool events to stay synchronized with the network @@ -138,11 +162,12 @@ enum ActorMode { /// /// ## Lifecycle /// -/// 1. **Initialization**: Loads account state from the store or uses provided account data. +/// 1. **Initialization**: Checks DB for available notes to determine initial mode. /// 2. **Event Loop**: Continuously processes mempool events and executes transactions. /// 3. **Transaction Processing**: Selects, executes, and proves transactions, and submits them to /// block producer. -/// 4. **State Updates**: Updates internal state based on mempool events and execution results. +/// 4. **State Updates**: Event effects are persisted to DB by the coordinator before actors are +/// notified. /// 5. **Shutdown**: Terminates gracefully when cancelled or encounters unrecoverable errors. /// /// ## Concurrency @@ -153,15 +178,21 @@ enum ActorMode { pub struct AccountActor { origin: AccountOrigin, store: StoreClient, + db: Db, mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, - // TODO(sergerad): Remove block producer when block proving moved to store. block_producer: BlockProducerClient, validator: ValidatorClient, prover: Option, chain_state: Arc>, script_cache: LruCache, + /// Maximum number of notes per transaction. + max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + max_note_attempts: usize, + /// Channel for sending notifications to the coordinator. + notification_tx: mpsc::Sender, } impl AccountActor { @@ -185,6 +216,7 @@ impl AccountActor { Self { origin, store: actor_context.store.clone(), + db: actor_context.db.clone(), mode: ActorMode::NoViableNotes, event_rx, cancel_token, @@ -193,29 +225,28 @@ impl AccountActor { prover, chain_state: actor_context.chain_state.clone(), script_cache: actor_context.script_cache.clone(), + max_notes_per_tx: actor_context.max_notes_per_tx, + max_note_attempts: actor_context.max_note_attempts, + notification_tx: actor_context.notification_tx.clone(), } } /// Runs the account actor, processing events and managing state until a reason to shutdown is /// encountered. pub async fn run(mut self, semaphore: Arc) -> ActorShutdownReason { - // Load the account state from the store and set up the account actor state. - let account = { - match self.origin { - AccountOrigin::Store(account_id) => self - .store - .get_network_account(account_id) - .await - .expect("actor should be able to load account") - .expect("actor account should exist"), - AccountOrigin::Transaction(ref account) => *(account.clone()), - } - }; + let account_id = self.origin.id(); + + // Determine initial mode by checking DB for available notes. let block_num = self.chain_state.read().await.chain_tip_header.block_num(); - let mut state = - NetworkAccountState::load(account, self.origin.id(), &self.store, block_num) - .await - .expect("actor should be able to load account state"); + let has_notes = self + .db + .has_available_notes(account_id, block_num, self.max_note_attempts) + .await + .expect("actor should be able to check for available notes"); + + if has_notes { + self.mode = ActorMode::NotesAvailable; + } loop { // Enable or disable transaction execution based on actor mode. @@ -229,28 +260,31 @@ impl AccountActor { }; tokio::select! { _ = self.cancel_token.cancelled() => { - return ActorShutdownReason::Cancelled(self.origin.id()); + return ActorShutdownReason::Cancelled(account_id); } // Handle mempool events. event = self.event_rx.recv() => { let Some(event) = event else { return ActorShutdownReason::EventChannelClosed; }; - // Re-enable transaction execution if the transaction being waited on has been - // added to the mempool. + // Re-enable transaction execution if the transaction being waited on has + // been resolved (added to mempool, committed in a block, or reverted). if let ActorMode::TransactionInflight(awaited_id) = self.mode { - if let MempoolEvent::TransactionAdded { id, .. } = *event { - if id == awaited_id { - self.mode = ActorMode::NotesAvailable; - } + let should_wake = match event.as_ref() { + MempoolEvent::TransactionAdded { id, .. } => *id == awaited_id, + MempoolEvent::BlockCommitted { txs, .. } => { + txs.contains(&awaited_id) + }, + MempoolEvent::TransactionsReverted(tx_ids) => { + tx_ids.contains(&awaited_id) + }, + }; + if should_wake { + self.mode = ActorMode::NotesAvailable; } } else { self.mode = ActorMode::NotesAvailable; } - // Update state. - if let Some(shutdown_reason) = state.mempool_update(event.as_ref()) { - return shutdown_reason; - } }, // Execute transactions. permit = tx_permit_acquisition => { @@ -258,9 +292,15 @@ impl AccountActor { Ok(_permit) => { // Read the chain state. let chain_state = self.chain_state.read().await.clone(); - // Find a candidate transaction and execute it. - if let Some(tx_candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX, chain_state) { - self.execute_transactions(&mut state, tx_candidate).await; + + // Query DB for latest account and available notes. + let tx_candidate = self.select_candidate_from_db( + account_id, + chain_state, + ).await; + + if let Some(tx_candidate) = tx_candidate { + self.execute_transactions(account_id, tx_candidate).await; } else { // No transactions to execute, wait for events. self.mode = ActorMode::NoViableNotes; @@ -275,13 +315,44 @@ impl AccountActor { } } + /// Selects a transaction candidate by querying the DB. + async fn select_candidate_from_db( + &self, + account_id: NetworkAccountId, + chain_state: ChainState, + ) -> Option { + let block_num = chain_state.chain_tip_header.block_num(); + let max_notes = self.max_notes_per_tx.get(); + + let (latest_account, notes) = self + .db + .select_candidate(account_id, block_num, self.max_note_attempts) + .await + .expect("actor should be able to query DB for candidate"); + + let account = latest_account?; + + let notes: Vec<_> = notes.into_iter().take(max_notes).collect(); + if notes.is_empty() { + return None; + } + + let (chain_tip_header, chain_mmr) = chain_state.into_parts(); + Some(TransactionCandidate { + account, + notes, + chain_tip_header, + chain_mmr, + }) + } + /// Execute a transaction candidate and mark notes as failed as required. /// /// Updates the state of the actor based on the execution result. - #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, state, tx_candidate))] + #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, tx_candidate))] async fn execute_transactions( &mut self, - state: &mut NetworkAccountState, + account_id: NetworkAccountId, tx_candidate: TransactionCandidate, ) { let block_num = tx_candidate.chain_tip_header.block_num(); @@ -293,31 +364,55 @@ impl AccountActor { self.prover.clone(), self.store.clone(), self.script_cache.clone(), + self.db.clone(), ); let notes = tx_candidate.notes.clone(); let execution_result = context.execute_transaction(tx_candidate).await; match execution_result { // Execution completed without failed notes. - Ok((tx_id, failed)) if failed.is_empty() => { + Ok((tx_id, failed, scripts_to_cache)) if failed.is_empty() => { + self.cache_note_scripts(scripts_to_cache).await; self.mode = ActorMode::TransactionInflight(tx_id); }, // Execution completed with some failed notes. - Ok((tx_id, failed)) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(notes.as_slice(), block_num); + Ok((tx_id, failed, scripts_to_cache)) => { + self.cache_note_scripts(scripts_to_cache).await; + let nullifiers: Vec<_> = + failed.into_iter().map(|note| note.note.nullifier()).collect(); + self.mark_notes_failed(&nullifiers, block_num).await; self.mode = ActorMode::TransactionInflight(tx_id); }, // Transaction execution failed. Err(err) => { tracing::error!(err = err.as_report(), "network transaction failed"); self.mode = ActorMode::NoViableNotes; - let notes = - notes.into_iter().map(|note| note.into_inner().into()).collect::>(); - state.notes_failed(notes.as_slice(), block_num); + let nullifiers: Vec<_> = notes.iter().map(InflightNetworkNote::nullifier).collect(); + self.mark_notes_failed(&nullifiers, block_num).await; }, } } + + /// Sends notifications to the coordinator to cache note scripts fetched from the remote store. + async fn cache_note_scripts(&self, scripts: Vec<(Word, NoteScript)>) { + for (script_root, script) in scripts { + let _ = self + .notification_tx + .send(ActorNotification::CacheNoteScript { script_root, script }) + .await; + } + } + + /// Sends a notification to the coordinator to mark notes as failed. + async fn mark_notes_failed(&self, nullifiers: &[Nullifier], block_num: BlockNumber) { + let _ = self + .notification_tx + .send(ActorNotification::NotesFailed { + nullifiers: nullifiers.to_vec(), + block_num, + }) + .await; + } } // HELPERS @@ -334,7 +429,7 @@ impl AccountActor { /// - After 10 attempts, the backoff period is 12 blocks. /// - After 20 attempts, the backoff period is 148 blocks. /// - etc... -#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] +#[expect(clippy::cast_precision_loss, clippy::cast_sign_loss)] fn has_backoff_passed( chain_tip: BlockNumber, last_attempt: Option, @@ -354,3 +449,34 @@ fn has_backoff_passed( // Check if the backoff period has passed. blocks_passed.as_usize() > backoff_threshold } + +#[cfg(test)] +mod tests { + use miden_protocol::block::BlockNumber; + + use super::has_backoff_passed; + + #[rstest::rstest] + #[test] + #[case::all_zero(Some(BlockNumber::GENESIS), BlockNumber::GENESIS, 0, true)] + #[case::no_attempts(None, BlockNumber::GENESIS, 0, true)] + #[case::one_attempt(Some(BlockNumber::GENESIS), BlockNumber::from(2), 1, true)] + #[case::three_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(3), 3, true)] + #[case::ten_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(13), 10, true)] + #[case::twenty_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(149), 20, true)] + #[case::one_attempt_false(Some(BlockNumber::GENESIS), BlockNumber::from(1), 1, false)] + #[case::three_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(2), 3, false)] + #[case::ten_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(12), 10, false)] + #[case::twenty_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(148), 20, false)] + fn backoff_has_passed( + #[case] last_attempt_block_num: Option, + #[case] current_block_num: BlockNumber, + #[case] attempt_count: usize, + #[case] backoff_should_have_passed: bool, + ) { + assert_eq!( + backoff_should_have_passed, + has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) + ); + } +} diff --git a/crates/ntx-builder/src/actor/note_state.rs b/crates/ntx-builder/src/actor/note_state.rs deleted file mode 100644 index 87b91fc21a..0000000000 --- a/crates/ntx-builder/src/actor/note_state.rs +++ /dev/null @@ -1,258 +0,0 @@ -use std::collections::{HashMap, VecDeque}; - -use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; -use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::account::{Account, AccountDelta, AccountId}; -use miden_protocol::block::BlockNumber; -use miden_protocol::note::Nullifier; - -use crate::actor::inflight_note::InflightNetworkNote; - -// ACCOUNT STATE -// ================================================================================================ - -/// Tracks the state of a network account and its notes. -#[derive(Clone)] -pub struct NetworkAccountNoteState { - /// The committed account state, if any. - /// - /// Its possible this is `None` if the account creation transaction is still inflight. - committed: Option, - - /// Inflight account updates in chronological order. - inflight: VecDeque, - - /// Unconsumed notes of this account. - available_notes: HashMap, - - /// Notes which have been consumed by transactions that are still inflight. - nullified_notes: HashMap, -} - -impl NetworkAccountNoteState { - /// Creates a new account state from the supplied account and notes. - pub fn new(account: Account, notes: Vec) -> Self { - let account_id = NetworkAccountId::try_from(account.id()) - .expect("only network accounts are used for account state"); - - let mut state = Self { - committed: Some(account), - inflight: VecDeque::default(), - available_notes: HashMap::default(), - nullified_notes: HashMap::default(), - }; - - for note in notes { - // Currently only support single target network notes in NTB. - assert!( - note.account_id() == account_id, - "Notes supplied into account state must match expected account ID" - ); - state.add_note(note); - } - - state - } - - /// Returns an iterator over inflight notes that are not currently within their respective - /// backoff periods based on block number. - pub fn available_notes( - &self, - block_num: &BlockNumber, - ) -> impl Iterator { - self.available_notes.values().filter(|¬e| note.is_available(*block_num)) - } - - /// Appends a delta to the set of inflight account updates. - pub fn add_delta(&mut self, delta: &AccountDelta) { - let mut state = self.latest_account(); - state - .apply_delta(delta) - .expect("network account delta should apply since it was accepted by the mempool"); - - self.inflight.push_back(state); - } - - /// Commits the oldest account state delta. - /// - /// # Panics - /// - /// Panics if there are no deltas to commit. - pub fn commit_delta(&mut self) { - self.committed = self.inflight.pop_front().expect("must have a delta to commit").into(); - } - - /// Reverts the newest account state delta. - /// - /// # Returns - /// - /// Returns `true` if this reverted the account creation delta. The caller _must_ remove this - /// account and associated notes as calls to `account` will panic. - /// - /// # Panics - /// - /// Panics if there are no deltas to revert. - #[must_use = "must remove this account and its notes"] - pub fn revert_delta(&mut self) -> bool { - self.inflight.pop_back().expect("must have a delta to revert"); - self.committed.is_none() && self.inflight.is_empty() - } - - /// Adds a new network note making it available for consumption. - pub fn add_note(&mut self, note: SingleTargetNetworkNote) { - self.available_notes.insert(note.nullifier(), InflightNetworkNote::new(note)); - } - - /// Removes the note completely. - pub fn revert_note(&mut self, note: Nullifier) { - // Transactions can be reverted out of order. - // - // This means the tx which nullified the note might not have been reverted yet, and the note - // might still be in the nullified - self.available_notes.remove(¬e); - self.nullified_notes.remove(¬e); - } - - /// Marks a note as being consumed. - /// - /// The note data is retained until the nullifier is committed. - /// - /// Returns `Err(())` if the note does not exist or was already nullified. - pub fn add_nullifier(&mut self, nullifier: Nullifier) -> Result<(), ()> { - if let Some(note) = self.available_notes.remove(&nullifier) { - self.nullified_notes.insert(nullifier, note); - Ok(()) - } else { - tracing::warn!(%nullifier, "note must be available to nullify"); - Err(()) - } - } - - /// Marks a nullifier as being committed, removing the associated note data entirely. - /// - /// Silently ignores the request if the nullifier is not present, which can happen - /// if the note's transaction wasn't available when the nullifier was added. - pub fn commit_nullifier(&mut self, nullifier: Nullifier) { - // we might not have this if we didn't add it with `add_nullifier` - // in case it's transaction wasn't available in the first place. - // It shouldn't happen practically, since we skip them if the - // relevant account cannot be retrieved via `fetch`. - - let _ = self.nullified_notes.remove(&nullifier); - } - - /// Reverts a nullifier, marking the associated note as available again. - pub fn revert_nullifier(&mut self, nullifier: Nullifier) { - // Transactions can be reverted out of order. - // - // The note may already have been fully removed by `revert_note` if the transaction creating - // the note was reverted before the transaction that consumed it. - if let Some(note) = self.nullified_notes.remove(&nullifier) { - self.available_notes.insert(nullifier, note); - } - } - - /// Drops all notes that have failed to be consumed after a certain number of attempts. - pub fn drop_failing_notes(&mut self, max_attempts: usize) { - self.available_notes.retain(|_, note| note.attempt_count() < max_attempts); - } - - /// Returns the latest inflight account state. - pub fn latest_account(&self) -> Account { - self.inflight - .back() - .or(self.committed.as_ref()) - .expect("account must have either a committed or inflight state") - .clone() - } - - /// Returns `true` if there is no inflight state being tracked. - /// - /// This implies this state is safe to remove without losing uncommitted data. - pub fn is_empty(&self) -> bool { - self.inflight.is_empty() - && self.available_notes.is_empty() - && self.nullified_notes.is_empty() - } - - /// Marks the specified notes as failed. - pub fn fail_notes(&mut self, nullifiers: &[Nullifier], block_num: BlockNumber) { - for nullifier in nullifiers { - if let Some(note) = self.available_notes.get_mut(nullifier) { - note.fail(block_num); - } else { - tracing::warn!(%nullifier, "failed note is not in account's state"); - } - } - } -} - -// NETWORK ACCOUNT UPDATE -// ================================================================================================ - -#[derive(Clone)] -pub enum NetworkAccountEffect { - Created(Account), - Updated(AccountDelta), -} - -impl NetworkAccountEffect { - pub fn from_protocol(update: &AccountUpdateDetails) -> Option { - let update = match update { - AccountUpdateDetails::Private => return None, - AccountUpdateDetails::Delta(update) if update.is_full_state() => { - NetworkAccountEffect::Created( - Account::try_from(update) - .expect("Account should be derivable by full state AccountDelta"), - ) - }, - AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), - }; - - update.protocol_account_id().is_network().then_some(update) - } - - pub fn network_account_id(&self) -> NetworkAccountId { - // SAFETY: This is a network account by construction. - self.protocol_account_id().try_into().unwrap() - } - - fn protocol_account_id(&self) -> AccountId { - match self { - NetworkAccountEffect::Created(acc) => acc.id(), - NetworkAccountEffect::Updated(delta) => delta.id(), - } - } -} - -#[cfg(test)] -mod tests { - use miden_protocol::block::BlockNumber; - - #[rstest::rstest] - #[test] - #[case::all_zero(Some(BlockNumber::from(0)), BlockNumber::from(0), 0, true)] - #[case::no_attempts(None, BlockNumber::from(0), 0, true)] - #[case::one_attempt(Some(BlockNumber::from(0)), BlockNumber::from(2), 1, true)] - #[case::three_attempts(Some(BlockNumber::from(0)), BlockNumber::from(3), 3, true)] - #[case::ten_attempts(Some(BlockNumber::from(0)), BlockNumber::from(13), 10, true)] - #[case::twenty_attempts(Some(BlockNumber::from(0)), BlockNumber::from(149), 20, true)] - #[case::one_attempt_false(Some(BlockNumber::from(0)), BlockNumber::from(1), 1, false)] - #[case::three_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(2), 3, false)] - #[case::ten_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(12), 10, false)] - #[case::twenty_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(148), 20, false)] - fn backoff_has_passed( - #[case] last_attempt_block_num: Option, - #[case] current_block_num: BlockNumber, - #[case] attempt_count: usize, - #[case] backoff_should_have_passed: bool, - ) { - use crate::actor::has_backoff_passed; - - assert_eq!( - backoff_should_have_passed, - has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) - ); - } -} diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index ce4d7b9c6a..53925bdcf8 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -62,7 +62,7 @@ impl BlockProducerClient { pub async fn subscribe_to_mempool_with_retry( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let mut retry_counter = 0; loop { match self.subscribe_to_mempool(chain_tip).await { @@ -90,7 +90,7 @@ impl BlockProducerClient { async fn subscribe_to_mempool( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let request = proto::block_producer::MempoolSubscriptionRequest { chain_tip: chain_tip.as_u32() }; let stream = self.client.clone().mempool_subscription(request).await?; diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 8b789779f7..adaee152a9 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -1,56 +1,64 @@ -use std::num::NonZeroUsize; +use std::pin::Pin; use std::sync::Arc; use anyhow::Context; -use futures::TryStreamExt; +use futures::Stream; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_utils::lru_cache::LruCache; -use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::block::BlockHeader; use miden_protocol::crypto::merkle::mmr::PartialMmr; -use miden_protocol::note::NoteScript; use miden_protocol::transaction::PartialBlockchain; use tokio::sync::{RwLock, mpsc}; -use url::Url; +use tokio_stream::StreamExt; +use tonic::Status; -use crate::MAX_IN_PROGRESS_TXS; -use crate::actor::{AccountActorContext, AccountOrigin}; -use crate::block_producer::BlockProducerClient; +use crate::NtxBuilderConfig; +use crate::actor::{AccountActorContext, AccountOrigin, ActorNotification}; use crate::coordinator::Coordinator; +use crate::db::Db; use crate::store::StoreClient; -// CONSTANTS -// ================================================================================================= - -/// The maximum number of blocks to keep in memory while tracking the chain tip. -const MAX_BLOCK_COUNT: usize = 4; - // CHAIN STATE // ================================================================================================ /// Contains information about the chain that is relevant to the [`NetworkTransactionBuilder`] and -/// all account actors managed by the [`Coordinator`] +/// all account actors managed by the [`Coordinator`]. +/// +/// The chain MMR stored here contains: +/// - The MMR peaks. +/// - Block headers and authentication paths for the last [`NtxBuilderConfig::max_block_count`] +/// blocks. +/// +/// Authentication paths for older blocks are pruned because the NTX builder executes all notes as +/// "unauthenticated" (see [`InputNotes::from_unauthenticated_notes`]) and therefore does not need +/// to prove that input notes were created in specific past blocks. #[derive(Debug, Clone)] pub struct ChainState { /// The current tip of the chain. pub chain_tip_header: BlockHeader, - /// A partial representation of the latest state of the chain. - pub chain_mmr: PartialBlockchain, + /// A partial representation of the chain MMR. + /// + /// Contains block headers and authentication paths for the last + /// [`NtxBuilderConfig::max_block_count`] blocks only, since all notes are executed as + /// unauthenticated. + pub chain_mmr: Arc, } impl ChainState { /// Constructs a new instance of [`ChainState`]. - fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + pub(crate) fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { let chain_mmr = PartialBlockchain::new(chain_mmr, []) .expect("partial blockchain should build from partial mmr"); - Self { chain_tip_header, chain_mmr } + Self { + chain_tip_header, + chain_mmr: Arc::new(chain_mmr), + } } /// Consumes the chain state and returns the chain tip header and the partial blockchain as a /// tuple. - pub fn into_parts(self) -> (BlockHeader, PartialBlockchain) { + pub fn into_parts(self) -> (BlockHeader, Arc) { (self.chain_tip_header, self.chain_mmr) } } @@ -58,103 +66,84 @@ impl ChainState { // NETWORK TRANSACTION BUILDER // ================================================================================================ +/// A boxed, pinned stream of mempool events with a `'static` lifetime. +/// +/// Boxing gives the stream a `'static` lifetime by ensuring it owns all its data, avoiding +/// complex lifetime annotations that would otherwise be required when storing `impl TryStream`. +pub(crate) type MempoolEventStream = + Pin> + Send>>; + /// Network transaction builder component. /// -/// The network transaction builder is in in charge of building transactions that consume notes +/// The network transaction builder is in charge of building transactions that consume notes /// against network accounts. These notes are identified and communicated by the block producer. /// The service maintains a list of unconsumed notes and periodically executes and proves /// transactions that consume them (reaching out to the store to retrieve state as necessary). /// /// The builder manages the tasks for every network account on the chain through the coordinator. +/// +/// Create an instance using [`NtxBuilderConfig::build()`]. pub struct NetworkTransactionBuilder { - /// Address of the store gRPC server. - store_url: Url, - /// Address of the block producer gRPC server. - block_producer_url: Url, - /// Address of the Validator server. - validator_url: Url, - /// Address of the remote prover. If `None`, transactions will be proven locally, which is - /// undesirable due to the performance impact. - tx_prover_url: Option, - /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. - /// This cache is shared across all account actors. - script_cache: LruCache, + /// Configuration for the builder. + config: NtxBuilderConfig, /// Coordinator for managing actor tasks. coordinator: Coordinator, + /// Client for the store gRPC API. + store: StoreClient, + /// Database for persistent state. + db: Db, + /// Shared chain state updated by the event loop and read by actors. + chain_state: Arc>, + /// Context shared with all account actors. + actor_context: AccountActorContext, + /// Stream of mempool events from the block producer. + mempool_events: MempoolEventStream, + /// Receiver for notifications from account actors (e.g., note failures). + notification_rx: mpsc::Receiver, } impl NetworkTransactionBuilder { - /// Channel capacity for account loading. - const ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; - - /// Creates a new instance of the network transaction builder. - pub fn new( - store_url: Url, - block_producer_url: Url, - validator_url: Url, - tx_prover_url: Option, - script_cache_size: NonZeroUsize, + #[expect(clippy::too_many_arguments)] + pub(crate) fn new( + config: NtxBuilderConfig, + coordinator: Coordinator, + store: StoreClient, + db: Db, + chain_state: Arc>, + actor_context: AccountActorContext, + mempool_events: MempoolEventStream, + notification_rx: mpsc::Receiver, ) -> Self { - let script_cache = LruCache::new(script_cache_size); - let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); Self { - store_url, - block_producer_url, - validator_url, - tx_prover_url, - script_cache, + config, coordinator, + store, + db, + chain_state, + actor_context, + mempool_events, + notification_rx, } } - /// Runs the network transaction builder until a fatal error occurs. + /// Runs the network transaction builder event loop until a fatal error occurs. + /// + /// This method: + /// 1. Spawns a background task to load existing network accounts from the store + /// 2. Runs the main event loop, processing mempool events and managing actors + /// + /// # Errors + /// + /// Returns an error if: + /// - The mempool event stream ends unexpectedly + /// - An actor encounters a fatal error + /// - The account loader task fails pub async fn run(mut self) -> anyhow::Result<()> { - let store = StoreClient::new(self.store_url.clone()); - let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); - - // Loop until we successfully subscribe. - // - // The mempool rejects our subscription if we don't have the same view of the chain aka - // if our chain tip does not match the mempools. This can occur if a new block is committed - // _after_ we fetch the chain tip from the store but _before_ our subscription request is - // handled. - // - // This is a hack-around for https://github.com/0xMiden/miden-node/issues/1566. - let (chain_tip_header, chain_mmr, mut mempool_events) = loop { - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - - match block_producer - .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) - .await - { - Ok(subscription) => break (chain_tip_header, chain_mmr, subscription), - Err(status) if status.code() == tonic::Code::InvalidArgument => { - tracing::error!(err=%status, "mempool subscription failed due to desync, trying again"); - }, - Err(err) => return Err(err).context("failed to subscribe to mempool events"), - } - }; - - // Create chain state that will be updated by the coordinator and read by actors. - let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); - - let actor_context = AccountActorContext { - block_producer_url: self.block_producer_url.clone(), - validator_url: self.validator_url.clone(), - tx_prover_url: self.tx_prover_url.clone(), - chain_state: chain_state.clone(), - store: store.clone(), - script_cache: self.script_cache.clone(), - }; - // Spawn a background task to load network accounts from the store. - // Accounts are sent through a channel in batches and processed in the main event loop. + // Accounts are sent through a channel and processed in the main event loop. let (account_tx, mut account_rx) = - mpsc::channel::(Self::ACCOUNT_CHANNEL_CAPACITY); - let account_loader_store = store.clone(); + mpsc::channel::(self.config.account_channel_capacity); + let account_loader_store = self.store.clone(); let mut account_loader_handle = tokio::spawn(async move { account_loader_store .stream_network_account_ids(account_tx) @@ -162,7 +151,7 @@ impl NetworkTransactionBuilder { .context("failed to load network accounts from store") }); - // Main loop which manages actors and passes mempool events to them. + // Main event loop. loop { tokio::select! { // Handle actor result. @@ -170,22 +159,22 @@ impl NetworkTransactionBuilder { result?; }, // Handle mempool events. - event = mempool_events.try_next() => { + event = self.mempool_events.next() => { let event = event .context("mempool event stream ended")? .context("mempool event stream failed")?; - self.handle_mempool_event( - event.into(), - &actor_context, - chain_state.clone(), - ).await?; + self.handle_mempool_event(event.into()).await?; }, // Handle account batches loaded from the store. // Once all accounts are loaded, the channel closes and this branch // becomes inactive (recv returns None and we stop matching). Some(account_id) = account_rx.recv() => { - self.handle_loaded_account(account_id, &actor_context).await?; + self.handle_loaded_account(account_id).await?; + }, + // Handle actor notifications (DB writes delegated from actors). + Some(notification) = self.notification_rx.recv() => { + self.handle_actor_notification(notification).await; }, // Handle account loader task completion/failure. // If the task fails, we abort since the builder would be in a degraded state @@ -202,44 +191,63 @@ impl NetworkTransactionBuilder { } } - /// Handles a batch of account IDs loaded from the store by spawning actors for them. - #[tracing::instrument( - name = "ntx.builder.handle_loaded_accounts", - skip(self, account_id, actor_context) - )] + /// Handles account IDs loaded from the store by syncing state to DB and spawning actors. + #[tracing::instrument(name = "ntx.builder.handle_loaded_account", skip(self, account_id))] async fn handle_loaded_account( &mut self, account_id: NetworkAccountId, - actor_context: &AccountActorContext, ) -> Result<(), anyhow::Error> { + // Fetch account from store and write to DB. + let account = self + .store + .get_network_account(account_id) + .await + .context("failed to load account from store")? + .context("account should exist in store")?; + + let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let notes = self + .store + .get_unconsumed_network_notes(account_id, block_num.as_u32()) + .await + .context("failed to load notes from store")?; + + // Write account and notes to DB. + self.db + .sync_account_from_store(account_id, account.clone(), notes.clone()) + .await + .context("failed to sync account to DB")?; + self.coordinator - .spawn_actor(AccountOrigin::store(account_id), actor_context) + .spawn_actor(AccountOrigin::store(account_id), &self.actor_context) .await?; Ok(()) } - /// Handles mempool events by sending them to actors via the coordinator and/or spawning new - /// actors as required. - #[tracing::instrument( - name = "ntx.builder.handle_mempool_event", - skip(self, event, actor_context, chain_state) - )] + /// Handles mempool events by writing to DB first, then routing to actors. + #[tracing::instrument(name = "ntx.builder.handle_mempool_event", skip(self, event))] async fn handle_mempool_event( &mut self, event: Arc, - actor_context: &AccountActorContext, - chain_state: Arc>, ) -> Result<(), anyhow::Error> { match event.as_ref() { MempoolEvent::TransactionAdded { account_delta, .. } => { + // Write event effects to DB first. + self.coordinator + .write_event(&event) + .await + .context("failed to write TransactionAdded to DB")?; + // Handle account deltas in case an account is being created. if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { // Handle account deltas for network accounts only. if let Some(network_account) = AccountOrigin::transaction(delta) { - // Spawn new actors if a transaction creates a new network account + // Spawn new actors if a transaction creates a new network account. let is_creating_account = delta.is_full_state(); if is_creating_account { - self.coordinator.spawn_actor(network_account, actor_context).await?; + self.coordinator + .spawn_actor(network_account, &self.actor_context) + .await?; } } } @@ -247,48 +255,70 @@ impl NetworkTransactionBuilder { Ok(()) }, // Update chain state and broadcast. - MempoolEvent::BlockCommitted { header, txs } => { - self.update_chain_tip(header.as_ref().clone(), chain_state).await; - self.coordinator.broadcast(event.clone()).await; + MempoolEvent::BlockCommitted { header, .. } => { + // Write event effects to DB first. + self.coordinator + .write_event(&event) + .await + .context("failed to write BlockCommitted to DB")?; - // All transactions pertaining to predating events should now be available through - // the store. So we can now drain them. - for tx_id in txs { - self.coordinator.drain_predating_events(tx_id); - } + self.update_chain_tip(header.as_ref().clone()).await; + self.coordinator.broadcast(event.clone()).await; Ok(()) }, // Broadcast to all actors. - MempoolEvent::TransactionsReverted(txs) => { + MempoolEvent::TransactionsReverted(_) => { + // Write event effects to DB first; returns reverted account IDs. + let reverted_accounts = self + .coordinator + .write_event(&event) + .await + .context("failed to write TransactionsReverted to DB")?; + self.coordinator.broadcast(event.clone()).await; - // Reverted predating transactions need not be processed. - for tx_id in txs { - self.coordinator.drain_predating_events(tx_id); + // Cancel actors for reverted account creations. + for account_id in &reverted_accounts { + self.coordinator.cancel_actor(account_id); } Ok(()) }, } } - /// Updates the chain tip and MMR block count. - /// - /// Blocks in the MMR are pruned if the block count exceeds the maximum. - async fn update_chain_tip(&mut self, tip: BlockHeader, chain_state: Arc>) { - // Lock the chain state. - let mut chain_state = chain_state.write().await; + /// Processes a notification from an account actor by performing the corresponding DB write. + async fn handle_actor_notification(&mut self, notification: ActorNotification) { + match notification { + ActorNotification::NotesFailed { nullifiers, block_num } => { + if let Err(err) = self.db.notes_failed(nullifiers, block_num).await { + tracing::error!(err = %err, "failed to mark notes as failed"); + } + }, + ActorNotification::CacheNoteScript { script_root, script } => { + if let Err(err) = self.db.insert_note_script(script_root, &script).await { + tracing::error!(err = %err, "failed to cache note script"); + } + }, + } + } + + /// Updates the chain tip and prunes old blocks from the MMR. + async fn update_chain_tip(&mut self, tip: BlockHeader) { + let mut chain_state = self.chain_state.write().await; // Update MMR which lags by one block. let mmr_tip = chain_state.chain_tip_header.clone(); - chain_state.chain_mmr.add_block(&mmr_tip, true); + Arc::make_mut(&mut chain_state.chain_mmr).add_block(&mmr_tip, true); // Set the new tip. chain_state.chain_tip_header = tip; // Keep MMR pruned. - let pruned_block_height = - (chain_state.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) - as u32; - chain_state.chain_mmr.prune_to(..pruned_block_height.into()); + let pruned_block_height = (chain_state + .chain_mmr + .chain_length() + .as_usize() + .saturating_sub(self.config.max_block_count)) as u32; + Arc::make_mut(&mut chain_state.chain_mmr).prune_to(..pruned_block_height.into()); } } diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 285cee47af..af2b840e44 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -2,18 +2,17 @@ use std::collections::HashMap; use std::sync::Arc; use anyhow::Context; -use indexmap::IndexMap; +use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::NetworkNote; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::transaction::TransactionId; use tokio::sync::mpsc::error::SendError; use tokio::sync::{Semaphore, mpsc}; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use crate::actor::{AccountActor, AccountActorContext, AccountOrigin, ActorShutdownReason}; +use crate::db::Db; // ACTOR HANDLE // ================================================================================================ @@ -87,23 +86,23 @@ pub struct Coordinator { /// ensuring fair resource allocation and system stability under load. semaphore: Arc, - /// Cache of events received from the mempool that predate corresponding network accounts. - /// Grouped by network account ID to allow targeted event delivery to actors upon creation. - predating_events: HashMap>>, + /// Database for persistent state. + db: Db, + + /// Channel size for each actor's event channel. + actor_channel_size: usize, } impl Coordinator { - /// Maximum number of messages of the message channel for each actor. - const ACTOR_CHANNEL_SIZE: usize = 100; - /// Creates a new coordinator with the specified maximum number of inflight transactions - /// and shared script cache. - pub fn new(max_inflight_transactions: usize) -> Self { + /// and actor channel size. + pub fn new(max_inflight_transactions: usize, actor_channel_size: usize, db: Db) -> Self { Self { actor_registry: HashMap::new(), actor_join_set: JoinSet::new(), semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), - predating_events: HashMap::new(), + db, + actor_channel_size, } } @@ -122,28 +121,24 @@ impl Coordinator { // If an actor already exists for this account ID, something has gone wrong. if let Some(handle) = self.actor_registry.remove(&account_id) { - tracing::error!("account actor already exists for account: {}", account_id); + tracing::error!( + account_id = %account_id, + "Account actor already exists" + ); handle.cancel_token.cancel(); } - let (event_tx, event_rx) = mpsc::channel(Self::ACTOR_CHANNEL_SIZE); + let (event_tx, event_rx) = mpsc::channel(self.actor_channel_size); let cancel_token = tokio_util::sync::CancellationToken::new(); let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); let handle = ActorHandle::new(event_tx, cancel_token); - // Run the actor. + // Run the actor. Actor reads state from DB on startup. let semaphore = self.semaphore.clone(); self.actor_join_set.spawn(Box::pin(actor.run(semaphore))); - // Send the new actor any events that contain notes that predate account creation. - if let Some(predating_events) = self.predating_events.remove(&account_id) { - for event in predating_events.values() { - Self::send(&handle, event.clone()).await?; - } - } - self.actor_registry.insert(account_id, handle); - tracing::info!("created actor for account: {}", account_id); + tracing::info!(account_id = %account_id, "Created actor for account prefix"); Ok(()) } @@ -154,18 +149,21 @@ impl Coordinator { /// message channel and can process it accordingly. /// /// If an actor fails to receive the event, it will be canceled. + #[tracing::instrument(name = "ntx.coordinator.broadcast", skip_all, fields( + actor.count = self.actor_registry.len(), + event.kind = %event.kind() + ))] pub async fn broadcast(&mut self, event: Arc) { - tracing::debug!( - actor_count = self.actor_registry.len(), - "broadcasting event to all actors" - ); - let mut failed_actors = Vec::new(); // Send event to all actors. for (account_id, handle) in &self.actor_registry { if let Err(err) = Self::send(handle, event.clone()).await { - tracing::error!("failed to send event to actor {}: {}", account_id, err); + tracing::error!( + account_id = %account_id, + error = %err, + "Failed to send event to actor" + ); failed_actors.push(*account_id); } } @@ -192,12 +190,7 @@ impl Coordinator { ActorShutdownReason::Cancelled(account_id) => { // Do not remove the actor from the registry, as it may be re-spawned. // The coordinator should always remove actors immediately after cancellation. - tracing::info!("account actor cancelled: {}", account_id); - Ok(()) - }, - ActorShutdownReason::AccountReverted(account_id) => { - tracing::info!("account reverted: {}", account_id); - self.actor_registry.remove(&account_id); + tracing::info!(account_id = %account_id, "Account actor cancelled"); Ok(()) }, ActorShutdownReason::EventChannelClosed => { @@ -219,19 +212,15 @@ impl Coordinator { /// Sends a mempool event to all network account actors that are found in the corresponding /// transaction's notes. /// - /// Caches the mempool event for each network account found in the transaction's notes that does - /// not currently have a corresponding actor. If an actor does not exist for the account, it is - /// assumed that the account has not been created on the chain yet. - /// - /// Cached events will be fed to the corresponding actor when the account creation transaction - /// is processed. + /// Events are sent only to actors that are currently active. Since event effects are already + /// persisted in the DB by `write_event()`, actors that spawn later read their state from the + /// DB and do not need predating events. pub async fn send_targeted( &mut self, event: &Arc, ) -> Result<(), SendError>> { let mut target_actors = HashMap::new(); - if let MempoolEvent::TransactionAdded { id, network_notes, account_delta, .. } = - event.as_ref() + if let MempoolEvent::TransactionAdded { network_notes, account_delta, .. } = event.as_ref() { // We need to inform the account if it was updated. This lets it know that its own // transaction has been applied, and in the future also resolves race conditions with @@ -249,17 +238,12 @@ impl Coordinator { // Determine target actors for each note. for note in network_notes { - let NetworkNote::SingleTarget(note) = note; - let network_account_id = note.account_id(); - if let Some(actor) = self.actor_registry.get(&network_account_id) { - // Register actor as target. - target_actors.insert(network_account_id, actor); - } else { - // Cache event for every note that doesn't have a corresponding actor. - self.predating_events - .entry(network_account_id) - .or_default() - .insert(*id, event.clone()); + let account = note.target_account_id(); + let account = NetworkAccountId::try_from(account) + .expect("network note target account should be a network account"); + + if let Some(actor) = self.actor_registry.get(&account) { + target_actors.insert(account, actor); } } } @@ -270,16 +254,52 @@ impl Coordinator { Ok(()) } - /// Removes any cached events for a given transaction ID from all account caches. - pub fn drain_predating_events(&mut self, tx_id: &TransactionId) { - // Remove the transaction from all account caches. - // This iterates over all predating events which is fine because the count is expected to be - // low. - self.predating_events.retain(|_, account_events| { - account_events.shift_remove(tx_id); - // Remove entries for accounts with no more cached events. - !account_events.is_empty() - }); + /// Writes mempool event effects to the database. + /// + /// This must be called BEFORE sending notifications to actors. For `TransactionsReverted`, + /// returns the list of account IDs whose creation was reverted. + pub async fn write_event( + &self, + event: &MempoolEvent, + ) -> Result, DatabaseError> { + match event { + MempoolEvent::TransactionAdded { + id, + nullifiers, + network_notes, + account_delta, + } => { + self.db + .handle_transaction_added( + *id, + account_delta.clone(), + network_notes.clone(), + nullifiers.clone(), + ) + .await?; + Ok(Vec::new()) + }, + MempoolEvent::BlockCommitted { header, txs } => { + self.db + .handle_block_committed( + txs.clone(), + header.block_num(), + header.as_ref().clone(), + ) + .await?; + Ok(Vec::new()) + }, + MempoolEvent::TransactionsReverted(tx_ids) => { + self.db.handle_transactions_reverted(tx_ids.iter().copied().collect()).await + }, + } + } + + /// Cancels an actor by its account ID. + pub fn cancel_actor(&mut self, account_id: &NetworkAccountId) { + if let Some(handle) = self.actor_registry.remove(account_id) { + handle.cancel_token.cancel(); + } } /// Helper function to send an event to a single account actor. diff --git a/crates/ntx-builder/src/db/migrations.rs b/crates/ntx-builder/src/db/migrations.rs new file mode 100644 index 0000000000..f3955cb2ad --- /dev/null +++ b/crates/ntx-builder/src/db/migrations.rs @@ -0,0 +1,29 @@ +use diesel::SqliteConnection; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use miden_node_db::DatabaseError; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::schema_hash::verify_schema; + +// The rebuild is automatically triggered by `build.rs` as described in +// . +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); + +#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +pub fn apply_migrations(conn: &mut SqliteConnection) -> Result<(), DatabaseError> { + let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); + tracing::info!(target: COMPONENT, migrations = migrations.len(), "Applying pending migrations"); + + let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + // Migrations applied successfully, verify schema hash. + verify_schema(conn)?; + return Ok(()); + }; + tracing::warn!(target: COMPONENT, "Failed to apply migration: {e:?}"); + // Something went wrong; revert the last migration. + conn.revert_last_migration(MIGRATIONS) + .expect("Duality is maintained by the developer"); + + Ok(()) +} diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql @@ -0,0 +1 @@ + diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql new file mode 100644 index 0000000000..68f3793d83 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql @@ -0,0 +1,71 @@ +-- Singleton row storing the chain tip header. +-- The chain MMR is reconstructed on startup from the store and maintained in memory. +CREATE TABLE chain_state ( + -- Singleton constraint: only one row allowed. + id INTEGER NOT NULL PRIMARY KEY CHECK (id = 0), + -- Block number of the chain tip. + block_num INTEGER NOT NULL, + -- Serialized BlockHeader. + block_header BLOB NOT NULL, + + CONSTRAINT chain_state_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) +); + +-- Account states: both committed and inflight. +-- Committed rows have transaction_id = NULL. Inflight rows have transaction_id set. +-- The auto-incrementing order_id preserves insertion order (VecDeque semantics). +CREATE TABLE accounts ( + -- Auto-incrementing ID preserves insertion order. + order_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + -- AccountId serialized bytes (8 bytes). + account_id BLOB NOT NULL, + -- Serialized Account state. + account_data BLOB NOT NULL, + -- NULL if this is the committed state; transaction ID if inflight. + transaction_id BLOB +); + +-- At most one committed row per account. +CREATE UNIQUE INDEX idx_accounts_committed ON accounts(account_id) WHERE transaction_id IS NULL; +-- At most one inflight row per (account, transaction) pair. +CREATE UNIQUE INDEX idx_accounts_inflight ON accounts(account_id, transaction_id) + WHERE transaction_id IS NOT NULL; +CREATE INDEX idx_accounts_account ON accounts(account_id); +CREATE INDEX idx_accounts_tx ON accounts(transaction_id) WHERE transaction_id IS NOT NULL; + +-- Notes: committed, inflight, and nullified — all in one table. +-- created_by = NULL means committed note; non-NULL means created by inflight tx. +-- consumed_by = NULL means unconsumed; non-NULL means consumed by inflight tx. +-- Row is deleted once consumption is committed. +CREATE TABLE notes ( + -- Nullifier bytes (32 bytes). Primary key. + nullifier BLOB PRIMARY KEY, + -- Target account ID. + account_id BLOB NOT NULL, + -- Serialized SingleTargetNetworkNote. + note_data BLOB NOT NULL, + -- Backoff tracking: number of failed execution attempts. + attempt_count INTEGER NOT NULL DEFAULT 0, + -- Backoff tracking: block number of the last failed attempt. NULL if never attempted. + last_attempt INTEGER, + -- NULL if the note came from a committed block; transaction ID if created by inflight tx. + created_by BLOB, + -- NULL if unconsumed; transaction ID of the consuming inflight tx. + consumed_by BLOB, + + CONSTRAINT notes_attempt_count_non_negative CHECK (attempt_count >= 0), + CONSTRAINT notes_last_attempt_is_u32 CHECK (last_attempt BETWEEN 0 AND 0xFFFFFFFF) +) WITHOUT ROWID; + +CREATE INDEX idx_notes_account ON notes(account_id); +CREATE INDEX idx_notes_created_by ON notes(created_by) WHERE created_by IS NOT NULL; +CREATE INDEX idx_notes_consumed_by ON notes(consumed_by) WHERE consumed_by IS NOT NULL; + +-- Persistent cache of note scripts, keyed by script root hash. +-- Survives restarts so scripts don't need to be re-fetched from the store. +CREATE TABLE note_scripts ( + -- Script root hash (Word serialized to 32 bytes). + script_root BLOB PRIMARY KEY, + -- Serialized NoteScript bytes. + script_data BLOB NOT NULL +) WITHOUT ROWID; diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs new file mode 100644 index 0000000000..37a31159b3 --- /dev/null +++ b/crates/ntx-builder/src/db/mod.rs @@ -0,0 +1,220 @@ +use std::path::PathBuf; + +use anyhow::Context; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::Word; +use miden_protocol::account::Account; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{NoteScript, Nullifier}; +use miden_protocol::transaction::TransactionId; +use miden_standards::note::AccountTargetNetworkNote; +use tracing::{info, instrument}; + +use crate::COMPONENT; +use crate::actor::inflight_note::InflightNetworkNote; +use crate::db::migrations::apply_migrations; +use crate::db::models::queries; + +pub(crate) mod models; + +mod migrations; +mod schema_hash; + +/// [diesel](https://diesel.rs) generated schema. +pub(crate) mod schema; + +pub type Result = std::result::Result; + +#[derive(Clone)] +pub struct Db { + inner: miden_node_db::Db, +} + +impl Db { + /// Creates and initializes the database, then opens an async connection pool. + #[instrument( + target = COMPONENT, + name = "ntx_builder.database.setup", + skip_all, + fields(path=%database_filepath.display()), + err, + )] + pub async fn setup(database_filepath: PathBuf) -> anyhow::Result { + let inner = miden_node_db::Db::new(&database_filepath) + .context("failed to build connection pool")?; + + info!( + target: COMPONENT, + sqlite = %database_filepath.display(), + "Connected to the database" + ); + + let me = Db { inner }; + me.inner + .query("migrations", apply_migrations) + .await + .context("failed to apply migrations on pool connection")?; + Ok(me) + } + + // PUBLIC QUERY METHODS + // ============================================================================================ + + /// Returns `true` if there are notes available for consumption by the given account. + pub async fn has_available_notes( + &self, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_attempts: usize, + ) -> Result { + self.inner + .query("has_available_notes", move |conn| { + let notes = queries::available_notes(conn, account_id, block_num, max_attempts)?; + Ok(!notes.is_empty()) + }) + .await + } + + /// Returns the latest account state and available notes for the given account. + pub async fn select_candidate( + &self, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_note_attempts: usize, + ) -> Result<(Option, Vec)> { + self.inner + .query("select_candidate", move |conn| { + let account = queries::get_account(conn, account_id)?; + let notes = + queries::available_notes(conn, account_id, block_num, max_note_attempts)?; + Ok((account, notes)) + }) + .await + } + + /// Marks notes as failed by incrementing `attempt_count` and setting `last_attempt`. + pub async fn notes_failed( + &self, + nullifiers: Vec, + block_num: BlockNumber, + ) -> Result<()> { + self.inner + .transact("notes_failed", move |conn| { + queries::notes_failed(conn, &nullifiers, block_num) + }) + .await + } + + /// Handles a `TransactionAdded` mempool event by writing effects to the DB. + pub async fn handle_transaction_added( + &self, + tx_id: TransactionId, + account_delta: Option, + notes: Vec, + nullifiers: Vec, + ) -> Result<()> { + self.inner + .transact("handle_transaction_added", move |conn| { + queries::add_transaction(conn, &tx_id, account_delta.as_ref(), ¬es, &nullifiers) + }) + .await + } + + /// Handles a `BlockCommitted` mempool event by committing transaction effects. + pub async fn handle_block_committed( + &self, + txs: Vec, + block_num: BlockNumber, + header: BlockHeader, + ) -> Result<()> { + self.inner + .transact("handle_block_committed", move |conn| { + queries::commit_block(conn, &txs, block_num, &header) + }) + .await + } + + /// Handles a `TransactionsReverted` mempool event by undoing transaction effects. + /// + /// Returns the list of account IDs whose creation was reverted. + pub async fn handle_transactions_reverted( + &self, + tx_ids: Vec, + ) -> Result> { + self.inner + .transact("handle_transactions_reverted", move |conn| { + queries::revert_transaction(conn, &tx_ids) + }) + .await + } + + /// Purges all inflight state. Called on startup to get a clean slate. + pub async fn purge_inflight(&self) -> Result<()> { + self.inner.transact("purge_inflight", queries::purge_inflight).await + } + + /// Inserts or replaces the singleton chain state row. + pub async fn upsert_chain_state( + &self, + block_num: BlockNumber, + header: BlockHeader, + ) -> Result<()> { + self.inner + .transact("upsert_chain_state", move |conn| { + queries::upsert_chain_state(conn, block_num, &header) + }) + .await + } + + /// Syncs an account and its notes from the store into the DB. + pub async fn sync_account_from_store( + &self, + account_id: NetworkAccountId, + account: Account, + notes: Vec, + ) -> Result<()> { + self.inner + .transact("sync_account_from_store", move |conn| { + queries::upsert_committed_account(conn, account_id, &account)?; + queries::insert_committed_notes(conn, ¬es)?; + Ok(()) + }) + .await + } + + /// Looks up a cached note script by root hash. + pub async fn lookup_note_script(&self, script_root: Word) -> Result> { + self.inner + .query("lookup_note_script", move |conn| { + queries::lookup_note_script(conn, &script_root) + }) + .await + } + + /// Persists a note script to the local cache. + pub async fn insert_note_script(&self, script_root: Word, script: &NoteScript) -> Result<()> { + let script = script.clone(); + self.inner + .transact("insert_note_script", move |conn| { + queries::insert_note_script(conn, &script_root, &script) + }) + .await + } + + /// Creates a file-backed SQLite test connection with migrations applied. + #[cfg(test)] + pub fn test_conn() -> (diesel::SqliteConnection, tempfile::TempDir) { + use diesel::{Connection, SqliteConnection}; + use miden_node_db::configure_connection_on_creation; + + let dir = tempfile::tempdir().expect("failed to create temp directory"); + let db_path = dir.path().join("test.sqlite3"); + let mut conn = SqliteConnection::establish(db_path.to_str().unwrap()) + .expect("temp file sqlite should always work"); + configure_connection_on_creation(&mut conn).expect("connection configuration should work"); + apply_migrations(&mut conn).expect("migrations should apply on empty database"); + (conn, dir) + } +} diff --git a/crates/ntx-builder/src/db/models/conv.rs b/crates/ntx-builder/src/db/models/conv.rs new file mode 100644 index 0000000000..b32a292538 --- /dev/null +++ b/crates/ntx-builder/src/db/models/conv.rs @@ -0,0 +1,71 @@ +//! Conversions between Miden domain types and database column types. + +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::Word; +use miden_protocol::account::{Account, AccountId}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{NoteScript, Nullifier}; +use miden_protocol::transaction::TransactionId; +use miden_tx::utils::{Deserializable, Serializable}; + +// SERIALIZATION (domain → DB) +// ================================================================================================ + +pub fn account_to_bytes(account: &Account) -> Vec { + account.to_bytes() +} + +pub fn block_header_to_bytes(header: &BlockHeader) -> Vec { + header.to_bytes() +} + +pub fn network_account_id_to_bytes(id: NetworkAccountId) -> Vec { + id.inner().to_bytes() +} + +pub fn transaction_id_to_bytes(id: &TransactionId) -> Vec { + id.to_bytes() +} + +pub fn nullifier_to_bytes(nullifier: &Nullifier) -> Vec { + nullifier.to_bytes() +} + +pub fn block_num_to_i64(block_num: BlockNumber) -> i64 { + i64::from(block_num.as_u32()) +} + +#[expect(clippy::cast_sign_loss)] +pub fn block_num_from_i64(val: i64) -> BlockNumber { + BlockNumber::from(val as u32) +} + +// DESERIALIZATION (DB → domain) +// ================================================================================================ + +pub fn account_from_bytes(bytes: &[u8]) -> Result { + Account::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("account", e)) +} + +pub fn account_id_from_bytes(bytes: &[u8]) -> Result { + AccountId::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("account id", e)) +} + +pub fn network_account_id_from_bytes(bytes: &[u8]) -> Result { + let account_id = account_id_from_bytes(bytes)?; + NetworkAccountId::try_from(account_id) + .map_err(|e| DatabaseError::deserialization("network account id", e)) +} + +pub fn word_to_bytes(word: &Word) -> Vec { + word.to_bytes() +} + +pub fn note_script_to_bytes(script: &NoteScript) -> Vec { + script.to_bytes() +} + +pub fn note_script_from_bytes(bytes: &[u8]) -> Result { + NoteScript::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("note script", e)) +} diff --git a/crates/ntx-builder/src/db/models/mod.rs b/crates/ntx-builder/src/db/models/mod.rs new file mode 100644 index 0000000000..405fe08146 --- /dev/null +++ b/crates/ntx-builder/src/db/models/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod conv; + +pub mod queries; diff --git a/crates/ntx-builder/src/db/models/queries/accounts.rs b/crates/ntx-builder/src/db/models/queries/accounts.rs new file mode 100644 index 0000000000..833f60ed8c --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/accounts.rs @@ -0,0 +1,102 @@ +//! Account-related queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::account::Account; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +/// Row for inserting into the unified `accounts` table. +/// +/// `transaction_id = None` means committed; `Some(tx_id_bytes)` means inflight. +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::accounts)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct AccountInsert { + pub account_id: Vec, + pub account_data: Vec, + pub transaction_id: Option>, +} + +/// Row read from `accounts`. +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name = schema::accounts)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct AccountRow { + pub account_data: Vec, +} + +// QUERIES +// ================================================================================================ + +/// Inserts or replaces the committed account state (`transaction_id = NULL`). +/// +/// Deletes any existing committed row first, then inserts a fresh one. +/// +/// # Raw SQL +/// +/// ```sql +/// DELETE FROM accounts WHERE account_id = ?1 AND transaction_id IS NULL +/// +/// INSERT INTO accounts (account_id, account_data, transaction_id) +/// VALUES (?1, ?2, NULL) +/// ``` +pub fn upsert_committed_account( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, + account: &Account, +) -> Result<(), DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // Delete the existing committed row (if any). + diesel::delete( + schema::accounts::table + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::transaction_id.is_null()), + ) + .execute(conn)?; + + // Insert the new committed row. + let row = AccountInsert { + account_id: account_id_bytes, + account_data: conversions::account_to_bytes(account), + transaction_id: None, + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn)?; + Ok(()) +} + +/// Returns the latest account state: last inflight row (highest `order_id`), or committed if +/// none. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT account_data +/// FROM accounts +/// WHERE account_id = ?1 +/// ORDER BY order_id DESC +/// LIMIT 1 +/// ``` +pub fn get_account( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, +) -> Result, DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // ORDER BY order_id DESC returns the latest inflight first, then committed. + let row: Option = schema::accounts::table + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .order(schema::accounts::order_id.desc()) + .select(AccountRow::as_select()) + .first(conn) + .optional()?; + + row.map(|AccountRow { account_data, .. }| conversions::account_from_bytes(&account_data)) + .transpose() +} diff --git a/crates/ntx-builder/src/db/models/queries/chain_state.rs b/crates/ntx-builder/src/db/models/queries/chain_state.rs new file mode 100644 index 0000000000..9b529cadc5 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/chain_state.rs @@ -0,0 +1,46 @@ +//! Chain state queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_protocol::block::{BlockHeader, BlockNumber}; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::chain_state)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct ChainStateInsert { + /// Singleton row ID. Always `0` to satisfy the `CHECK (id = 0)` constraint. + pub id: i32, + pub block_num: i64, + pub block_header: Vec, +} + +// QUERIES +// ================================================================================================ + +/// Inserts or replaces the singleton chain state row. +/// +/// # Raw SQL +/// +/// ```sql +/// INSERT OR REPLACE INTO chain_state (id, block_num, block_header) +/// VALUES (0, ?1, ?2) +/// ``` +pub fn upsert_chain_state( + conn: &mut SqliteConnection, + block_num: BlockNumber, + block_header: &BlockHeader, +) -> Result<(), DatabaseError> { + let row = ChainStateInsert { + id: 0, + block_num: conversions::block_num_to_i64(block_num), + block_header: conversions::block_header_to_bytes(block_header), + }; + diesel::replace_into(schema::chain_state::table).values(&row).execute(conn)?; + Ok(()) +} diff --git a/crates/ntx-builder/src/db/models/queries/mod.rs b/crates/ntx-builder/src/db/models/queries/mod.rs new file mode 100644 index 0000000000..9018c7eb34 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/mod.rs @@ -0,0 +1,324 @@ +//! Database query functions for the NTX builder. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; +use miden_standards::note::AccountTargetNetworkNote; +use miden_tx::utils::Serializable; + +use crate::actor::account_effect::NetworkAccountEffect; +use crate::db::models::conv as conversions; +use crate::db::schema; + +mod accounts; +pub use accounts::*; + +mod chain_state; +pub use chain_state::*; + +mod note_scripts; +pub use note_scripts::*; + +mod notes; +pub use notes::*; + +#[cfg(test)] +mod tests; + +// STARTUP QUERIES +// ================================================================================================ + +/// Purges all inflight state. Called on startup to get a clean state. +/// +/// - Deletes account rows with `transaction_id IS NOT NULL`. +/// - Deletes note rows with `created_by IS NOT NULL`. +/// - Sets `consumed_by = NULL` on notes consumed by inflight transactions. +/// +/// # Raw SQL +/// +/// ```sql +/// DELETE FROM accounts WHERE transaction_id IS NOT NULL +/// +/// DELETE FROM notes WHERE created_by IS NOT NULL +/// +/// UPDATE notes SET consumed_by = NULL WHERE consumed_by IS NOT NULL +/// ``` +pub fn purge_inflight(conn: &mut SqliteConnection) -> Result<(), DatabaseError> { + // Delete inflight account rows. + diesel::delete(schema::accounts::table.filter(schema::accounts::transaction_id.is_not_null())) + .execute(conn)?; + + // Delete inflight-created notes. + diesel::delete(schema::notes::table.filter(schema::notes::created_by.is_not_null())) + .execute(conn)?; + + // Un-nullify notes consumed by inflight transactions. + diesel::update(schema::notes::table.filter(schema::notes::consumed_by.is_not_null())) + .set(schema::notes::consumed_by.eq(None::>)) + .execute(conn)?; + + Ok(()) +} + +// MEMPOOL EVENT HANDLERS +// ================================================================================================ + +/// Handles a `TransactionAdded` event by writing effects to the DB. +/// +/// # Raw SQL +/// +/// For account updates (applies delta to latest state and inserts inflight row): +/// +/// ```sql +/// -- Fetch latest account (see latest_account) +/// INSERT INTO accounts (account_id, transaction_id, account_data) +/// VALUES (?1, ?2, ?3) +/// ``` +/// +/// Per note (idempotent via `INSERT OR IGNORE`): +/// +/// ```sql +/// INSERT OR IGNORE INTO notes +/// (nullifier, account_id, note_data, attempt_count, last_attempt, created_by, consumed_by) +/// VALUES (?1, ?2, ?3, 0, NULL, ?4, NULL) +/// ``` +/// +/// Per nullifier (marks notes as consumed): +/// +/// ```sql +/// UPDATE notes +/// SET consumed_by = ?1 +/// WHERE nullifier = ?2 AND consumed_by IS NULL +/// ``` +pub fn add_transaction( + conn: &mut SqliteConnection, + tx_id: &TransactionId, + account_delta: Option<&AccountUpdateDetails>, + notes: &[AccountTargetNetworkNote], + nullifiers: &[Nullifier], +) -> Result<(), DatabaseError> { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Process account delta. + if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { + let account_id = update.network_account_id(); + match update { + NetworkAccountEffect::Updated(ref account_delta) => { + // Query latest_account, apply delta, insert inflight row. + let current_account = + get_account(conn, account_id)?.expect("account must exist to apply delta"); + let mut updated = current_account; + updated.apply_delta(account_delta).expect( + "network account delta should apply since it was accepted by the mempool", + ); + + let insert = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(tx_id_bytes.clone()), + account_data: conversions::account_to_bytes(&updated), + }; + diesel::insert_into(schema::accounts::table).values(&insert).execute(conn)?; + }, + NetworkAccountEffect::Created(ref account) => { + let insert = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(tx_id_bytes.clone()), + account_data: conversions::account_to_bytes(account), + }; + diesel::insert_into(schema::accounts::table).values(&insert).execute(conn)?; + }, + } + } + + // Insert notes with created_by = tx_id. + // Uses INSERT OR IGNORE to make this idempotent if the same event is delivered twice + // (the nullifier PK would otherwise cause a constraint violation). + for note in notes { + let insert = NoteInsert { + nullifier: conversions::nullifier_to_bytes(¬e.as_note().nullifier()), + account_id: conversions::network_account_id_to_bytes( + note.target_account_id() + .try_into() + .expect("network note's target account must be a network account"), + ), + note_data: note.as_note().to_bytes(), + attempt_count: 0, + last_attempt: None, + created_by: Some(tx_id_bytes.clone()), + consumed_by: None, + }; + diesel::insert_or_ignore_into(schema::notes::table) + .values(&insert) + .execute(conn)?; + } + + // Mark consumed notes: set consumed_by = tx_id for matching nullifiers. + for nullifier in nullifiers { + let nullifier_bytes = conversions::nullifier_to_bytes(nullifier); + + // Only mark notes that are not already consumed. + diesel::update( + schema::notes::table + .find(&nullifier_bytes) + .filter(schema::notes::consumed_by.is_null()), + ) + .set(schema::notes::consumed_by.eq(Some(&tx_id_bytes))) + .execute(conn)?; + } + + Ok(()) +} + +/// Handles a `BlockCommitted` event by committing transaction effects. +/// +/// # Raw SQL +/// +/// Per committed transaction: +/// +/// ```sql +/// -- Find inflight accounts for this tx +/// SELECT account_id FROM accounts WHERE transaction_id = ?1 +/// +/// -- Delete old committed row +/// DELETE FROM accounts WHERE account_id = ?1 AND transaction_id IS NULL +/// +/// -- Promote inflight row to committed +/// UPDATE accounts SET transaction_id = NULL +/// WHERE account_id = ?1 AND transaction_id = ?2 +/// +/// -- Delete consumed notes +/// DELETE FROM notes WHERE consumed_by = ?1 +/// +/// -- Promote inflight-created notes to committed +/// UPDATE notes SET created_by = NULL WHERE created_by = ?1 +/// ``` +/// +/// Finally updates chain state (see [`upsert_chain_state`]). +pub fn commit_block( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], + block_num: BlockNumber, + block_header: &BlockHeader, +) -> Result<(), DatabaseError> { + for tx_id in tx_ids { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Promote inflight account rows: delete old committed, set transaction_id = NULL. + // Find accounts that have an inflight row for this tx. + let inflight_account_ids: Vec> = schema::accounts::table + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)) + .select(schema::accounts::account_id) + .load(conn)?; + + for account_id_bytes in &inflight_account_ids { + // Delete the old committed row for this account. + diesel::delete( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .filter(schema::accounts::transaction_id.is_null()), + ) + .execute(conn)?; + + // Promote the inflight row to committed (set transaction_id = NULL). + // Only promote the row for this specific tx. + diesel::update( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)), + ) + .set(schema::accounts::transaction_id.eq(None::>)) + .execute(conn)?; + } + + // Delete consumed notes (consumed_by = tx_id). + diesel::delete(schema::notes::table.filter(schema::notes::consumed_by.eq(&tx_id_bytes))) + .execute(conn)?; + + // Promote inflight-created notes to committed (set created_by = NULL). + diesel::update(schema::notes::table.filter(schema::notes::created_by.eq(&tx_id_bytes))) + .set(schema::notes::created_by.eq(None::>)) + .execute(conn)?; + } + + // Update chain state. + upsert_chain_state(conn, block_num, block_header)?; + + Ok(()) +} + +/// Handles a `TransactionsReverted` event by undoing transaction effects. +/// +/// Returns the list of account IDs whose creation was reverted (no committed row exists for that +/// account after removing the inflight rows). +/// +/// # Raw SQL +/// +/// Per reverted transaction: +/// +/// ```sql +/// -- Find affected accounts +/// SELECT account_id FROM accounts WHERE transaction_id = ?1 +/// +/// -- Delete inflight account rows +/// DELETE FROM accounts WHERE transaction_id = ?1 +/// +/// -- Check if account creation was fully reverted +/// SELECT COUNT(*) FROM accounts WHERE account_id = ?1 +/// +/// -- Delete inflight-created notes +/// DELETE FROM notes WHERE created_by = ?1 +/// +/// -- Restore consumed notes +/// UPDATE notes SET consumed_by = NULL WHERE consumed_by = ?1 +/// ``` +pub fn revert_transaction( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], +) -> Result, DatabaseError> { + let mut reverted_accounts = Vec::new(); + + for tx_id in tx_ids { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Find accounts affected by this transaction. + let affected_account_ids: Vec> = schema::accounts::table + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)) + .select(schema::accounts::account_id) + .load(conn)?; + + // Delete inflight account rows for this tx. + diesel::delete( + schema::accounts::table.filter(schema::accounts::transaction_id.eq(&tx_id_bytes)), + ) + .execute(conn)?; + + // Check if any affected accounts had their creation fully reverted + // (no committed row and no remaining inflight rows). + for account_id_bytes in &affected_account_ids { + let remaining: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .count() + .get_result(conn)?; + + if remaining == 0 { + let account_id = conversions::network_account_id_from_bytes(account_id_bytes)?; + reverted_accounts.push(account_id); + } + } + + // Delete inflight-created notes (created_by = tx_id). + diesel::delete(schema::notes::table.filter(schema::notes::created_by.eq(&tx_id_bytes))) + .execute(conn)?; + + // Un-nullify consumed notes (set consumed_by = NULL where consumed_by = tx_id). + diesel::update(schema::notes::table.filter(schema::notes::consumed_by.eq(&tx_id_bytes))) + .set(schema::notes::consumed_by.eq(None::>)) + .execute(conn)?; + } + + Ok(reverted_accounts) +} diff --git a/crates/ntx-builder/src/db/models/queries/note_scripts.rs b/crates/ntx-builder/src/db/models/queries/note_scripts.rs new file mode 100644 index 0000000000..09c03e4c1e --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/note_scripts.rs @@ -0,0 +1,56 @@ +//! Database queries for persisting and retrieving note scripts. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_protocol::Word; +use miden_protocol::note::NoteScript; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +#[derive(Insertable)] +#[diesel(table_name = schema::note_scripts)] +struct NoteScriptInsert { + script_root: Vec, + script_data: Vec, +} + +#[derive(Queryable, Selectable)] +#[diesel(table_name = schema::note_scripts)] +struct NoteScriptRow { + script_data: Vec, +} + +/// Looks up a note script by its root hash. +pub fn lookup_note_script( + conn: &mut SqliteConnection, + script_root: &Word, +) -> Result, DatabaseError> { + let root_bytes = conversions::word_to_bytes(script_root); + + let row: Option = schema::note_scripts::table + .find(root_bytes) + .select(NoteScriptRow::as_select()) + .first(conn) + .optional()?; + + row.map(|r| conversions::note_script_from_bytes(&r.script_data)).transpose() +} + +/// Inserts a note script (idempotent via INSERT OR IGNORE). +pub fn insert_note_script( + conn: &mut SqliteConnection, + script_root: &Word, + script: &NoteScript, +) -> Result<(), DatabaseError> { + let insert = NoteScriptInsert { + script_root: conversions::word_to_bytes(script_root), + script_data: conversions::note_script_to_bytes(script), + }; + + diesel::insert_or_ignore_into(schema::note_scripts::table) + .values(&insert) + .execute(conn)?; + + Ok(()) +} diff --git a/crates/ntx-builder/src/db/models/queries/notes.rs b/crates/ntx-builder/src/db/models/queries/notes.rs new file mode 100644 index 0000000000..b512e57bc2 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/notes.rs @@ -0,0 +1,175 @@ +//! Note-related queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{Note, Nullifier}; +use miden_standards::note::AccountTargetNetworkNote; +use miden_tx::utils::{Deserializable, Serializable}; + +use crate::actor::inflight_note::InflightNetworkNote; +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +/// Row read from the unified `notes` table. +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name = schema::notes)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct NoteRow { + pub note_data: Vec, + pub attempt_count: i32, + pub last_attempt: Option, +} + +/// Row for inserting into the unified `notes` table. +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::notes)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct NoteInsert { + pub nullifier: Vec, + pub account_id: Vec, + pub note_data: Vec, + pub attempt_count: i32, + pub last_attempt: Option, + pub created_by: Option>, + pub consumed_by: Option>, +} + +// QUERIES +// ================================================================================================ + +/// Batch inserts committed notes (`created_by = NULL`, `consumed_by = NULL`). +/// +/// # Raw SQL +/// +/// Per note: +/// +/// ```sql +/// INSERT OR REPLACE INTO notes +/// (nullifier, account_id, note_data, attempt_count, last_attempt, created_by, consumed_by) +/// VALUES (?1, ?2, ?3, 0, NULL, NULL, NULL) +/// ``` +pub fn insert_committed_notes( + conn: &mut SqliteConnection, + notes: &[AccountTargetNetworkNote], +) -> Result<(), DatabaseError> { + for note in notes { + let row = NoteInsert { + nullifier: conversions::nullifier_to_bytes(¬e.as_note().nullifier()), + account_id: conversions::network_account_id_to_bytes( + NetworkAccountId::try_from(note.target_account_id()) + .expect("account ID of a network note should be a network account"), + ), + note_data: note.as_note().to_bytes(), + attempt_count: 0, + last_attempt: None, + created_by: None, + consumed_by: None, + }; + diesel::replace_into(schema::notes::table).values(&row).execute(conn)?; + } + Ok(()) +} + +/// Returns notes available for consumption by a given account. +/// +/// Queries unconsumed notes (`consumed_by IS NULL`) for the account that have not exceeded the +/// maximum attempt count, then applies backoff filtering in Rust via +/// `InflightNetworkNote::is_available`. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT note_data, attempt_count, last_attempt +/// FROM notes +/// WHERE +/// account_id = ?1 +/// AND consumed_by IS NULL +/// AND attempt_count < ?2 +/// ``` +#[expect(clippy::cast_possible_wrap)] +pub fn available_notes( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_attempts: usize, +) -> Result, DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // Get unconsumed notes for this account that haven't exceeded the max attempt count. + let rows: Vec = schema::notes::table + .filter(schema::notes::account_id.eq(&account_id_bytes)) + .filter(schema::notes::consumed_by.is_null()) + .filter(schema::notes::attempt_count.lt(max_attempts as i32)) + .select(NoteRow::as_select()) + .load(conn)?; + + let mut result = Vec::new(); + for row in rows { + #[expect(clippy::cast_sign_loss)] + let attempt_count = row.attempt_count as usize; + let note = note_row_to_inflight( + &row.note_data, + attempt_count, + row.last_attempt.map(conversions::block_num_from_i64), + )?; + if note.is_available(block_num) { + result.push(note); + } + } + + Ok(result) +} + +/// Marks notes as failed by incrementing `attempt_count` and setting `last_attempt`. +/// +/// # Raw SQL +/// +/// Per nullifier: +/// +/// ```sql +/// UPDATE notes +/// SET attempt_count = attempt_count + 1, last_attempt = ?1 +/// WHERE nullifier = ?2 +/// ``` +pub fn notes_failed( + conn: &mut SqliteConnection, + nullifiers: &[Nullifier], + block_num: BlockNumber, +) -> Result<(), DatabaseError> { + let block_num_val = conversions::block_num_to_i64(block_num); + + for nullifier in nullifiers { + let nullifier_bytes = conversions::nullifier_to_bytes(nullifier); + + diesel::update(schema::notes::table.find(&nullifier_bytes)) + .set(( + schema::notes::attempt_count.eq(schema::notes::attempt_count + 1), + schema::notes::last_attempt.eq(Some(block_num_val)), + )) + .execute(conn)?; + } + Ok(()) +} + +// HELPERS +// ================================================================================================ + +/// Constructs an `InflightNetworkNote` from DB row fields. +fn note_row_to_inflight( + note_data: &[u8], + attempt_count: usize, + last_attempt: Option, +) -> Result { + let note = Note::read_from_bytes(note_data) + .map_err(|source| DatabaseError::deserialization("failed to parse note", source))?; + let note = AccountTargetNetworkNote::new(note).map_err(|source| { + DatabaseError::deserialization("failed to convert to network note", source) + })?; + + Ok(InflightNetworkNote::from_parts(note, attempt_count, last_attempt)) +} diff --git a/crates/ntx-builder/src/db/models/queries/tests.rs b/crates/ntx-builder/src/db/models/queries/tests.rs new file mode 100644 index 0000000000..83c62426b7 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/tests.rs @@ -0,0 +1,578 @@ +//! DB-level tests for NTX builder query functions. + +use diesel::prelude::*; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::Word; +use miden_protocol::account::{ + AccountComponentMetadata, + AccountId, + AccountStorageMode, + AccountType, +}; +use miden_protocol::block::BlockNumber; +use miden_protocol::testing::account_id::{ + ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE, + AccountIdBuilder, +}; +use miden_protocol::transaction::TransactionId; +use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint}; +use miden_standards::testing::note::NoteBuilder; +use rand_chacha::ChaCha20Rng; +use rand_chacha::rand_core::SeedableRng; + +use super::*; +use crate::db::models::conv as conversions; +use crate::db::{Db, schema}; + +// TEST HELPERS +// ================================================================================================ + +/// Creates a file-backed SQLite connection with migrations applied. +fn test_conn() -> (SqliteConnection, tempfile::TempDir) { + Db::test_conn() +} + +/// Creates a network account ID from a test constant. +fn mock_network_account_id() -> NetworkAccountId { + let account_id: AccountId = + ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE.try_into().unwrap(); + NetworkAccountId::try_from(account_id).unwrap() +} + +/// Creates a distinct network account ID using a seeded RNG. +fn mock_network_account_id_seeded(seed: u8) -> NetworkAccountId { + let account_id = AccountIdBuilder::new() + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Network) + .build_with_seed([seed; 32]); + NetworkAccountId::try_from(account_id).unwrap() +} + +/// Creates a unique `TransactionId` from a seed value. +fn mock_tx_id(seed: u64) -> TransactionId { + let w = |n: u64| Word::try_from([n, 0, 0, 0]).unwrap(); + TransactionId::new(w(seed), w(seed + 1), w(seed + 2), w(seed + 3)) +} + +/// Creates a `SingleTargetNetworkNote` targeting the given network account. +fn mock_single_target_note( + network_account_id: NetworkAccountId, + seed: u8, +) -> AccountTargetNetworkNote { + let mut rng = ChaCha20Rng::from_seed([seed; 32]); + let sender = AccountIdBuilder::new() + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Private) + .build_with_rng(&mut rng); + + let target = NetworkAccountTarget::new(network_account_id.inner(), NoteExecutionHint::Always) + .expect("network account should be valid target"); + + let note = NoteBuilder::new(sender, rng).attachment(target).build().unwrap(); + + AccountTargetNetworkNote::new(note).expect("note should be single-target network note") +} + +/// Counts the total number of rows in the `notes` table. +fn count_notes(conn: &mut SqliteConnection) -> i64 { + schema::notes::table.count().get_result(conn).unwrap() +} + +/// Counts the total number of rows in the `accounts` table. +fn count_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table.count().get_result(conn).unwrap() +} + +/// Counts inflight account rows. +fn count_inflight_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table + .filter(schema::accounts::transaction_id.is_not_null()) + .count() + .get_result(conn) + .unwrap() +} + +/// Counts committed account rows. +fn count_committed_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table + .filter(schema::accounts::transaction_id.is_null()) + .count() + .get_result(conn) + .unwrap() +} + +// PURGE INFLIGHT TESTS +// ================================================================================================ + +#[test] +fn purge_inflight_clears_all_inflight_state() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Insert committed account. + upsert_committed_account(conn, account_id, &mock_account(account_id)).unwrap(); + + // Insert a transaction (creates inflight account row + note + consumption). + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + assert!(count_inflight_accounts(conn) == 0); // No account delta, so no inflight account. + assert_eq!(count_notes(conn), 1); + + // Mark note as consumed by another tx. + let tx_id2 = mock_tx_id(2); + add_transaction(conn, &tx_id2, None, &[], &[note.as_note().nullifier()]).unwrap(); + + // Verify consumed_by is set. + let consumed_count: i64 = schema::notes::table + .filter(schema::notes::consumed_by.is_not_null()) + .count() + .get_result(conn) + .unwrap(); + assert_eq!(consumed_count, 1); + + // Purge inflight state. + purge_inflight(conn).unwrap(); + + // Inflight accounts should be gone. + assert_eq!(count_inflight_accounts(conn), 0); + // Committed account should remain. + assert_eq!(count_committed_accounts(conn), 1); + // Inflight-created notes should be gone. + assert_eq!(count_notes(conn), 0); +} + +// HANDLE TRANSACTION ADDED TESTS +// ================================================================================================ + +#[test] +fn transaction_added_inserts_notes_and_marks_consumed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note1 = mock_single_target_note(account_id, 10); + let note2 = mock_single_target_note(account_id, 20); + + // Insert committed note first (to test consumption). + insert_committed_notes(conn, std::slice::from_ref(¬e1)).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Add transaction that creates note2 and consumes note1. + add_transaction( + conn, + &tx_id, + None, + std::slice::from_ref(¬e2), + &[note1.as_note().nullifier()], + ) + .unwrap(); + + // Should now have 2 notes total. + assert_eq!(count_notes(conn), 2); + + // note1 should be consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e1.as_note().nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_some()); + + // note2 should have created_by set. + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e2.as_note().nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_some()); +} + +#[test] +fn transaction_added_is_idempotent_for_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Insert the same transaction twice. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + // Should only have one note (INSERT OR IGNORE). + assert_eq!(count_notes(conn), 1); +} + +// HANDLE BLOCK COMMITTED TESTS +// ================================================================================================ + +#[test] +fn block_committed_promotes_inflight_notes_to_committed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + + // Add a transaction that creates a note. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + // Verify created_by is set. + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_some()); + + // Commit the block. + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // created_by should now be NULL (promoted to committed). + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_none()); +} + +#[test] +fn block_committed_deletes_consumed_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + // Insert a committed note. + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Consume it via a transaction. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note.as_note().nullifier()]).unwrap(); + + // Commit the block. + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // Consumed note should be deleted. + assert_eq!(count_notes(conn), 0); +} + +#[test] +fn block_committed_promotes_inflight_account_to_committed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let account = mock_account(account_id); + + // Insert committed account. + upsert_committed_account(conn, account_id, &account).unwrap(); + assert_eq!(count_committed_accounts(conn), 1); + + // Insert inflight row. + let tx_id = mock_tx_id(1); + let row = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(conversions::transaction_id_to_bytes(&tx_id)), + account_data: conversions::account_to_bytes(&account), + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn).unwrap(); + + assert_eq!(count_inflight_accounts(conn), 1); + assert_eq!(count_committed_accounts(conn), 1); + + // Commit the block. + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // Should have 1 committed and 0 inflight. + assert_eq!(count_committed_accounts(conn), 1); + assert_eq!(count_inflight_accounts(conn), 0); +} + +// HANDLE TRANSACTIONS REVERTED TESTS +// ================================================================================================ + +#[test] +fn transactions_reverted_restores_consumed_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + // Insert committed note. + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + + // Consume it via a transaction. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note.as_note().nullifier()]).unwrap(); + + // Verify consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_some()); + + // Revert the transaction. + let reverted = revert_transaction(conn, &[tx_id]).unwrap(); + assert!(reverted.is_empty()); + + // Note should be un-consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_none()); +} + +#[test] +fn transactions_reverted_deletes_inflight_created_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Add transaction that creates a note. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Revert the transaction. + revert_transaction(conn, &[tx_id]).unwrap(); + + // Inflight-created note should be deleted. + assert_eq!(count_notes(conn), 0); +} + +#[test] +fn transactions_reverted_reports_reverted_account_creations() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let account = mock_account(account_id); + let tx_id = mock_tx_id(1); + + // Insert an inflight account row (simulating account creation by tx). + let row = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(conversions::transaction_id_to_bytes(&tx_id)), + account_data: conversions::account_to_bytes(&account), + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn).unwrap(); + + // Revert the transaction --- account creation should be reported. + let reverted = revert_transaction(conn, &[tx_id]).unwrap(); + assert_eq!(reverted.len(), 1); + assert_eq!(reverted[0], account_id); + + // Account should be gone. + assert_eq!(count_accounts(conn), 0); +} + +// AVAILABLE NOTES TESTS +// ================================================================================================ + +#[test] +fn available_notes_filters_consumed_and_exceeded_attempts() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note_good = mock_single_target_note(account_id, 10); + let note_consumed = mock_single_target_note(account_id, 20); + let note_failed = mock_single_target_note(account_id, 30); + + // Insert all as committed. + insert_committed_notes(conn, &[note_good.clone(), note_consumed.clone(), note_failed.clone()]) + .unwrap(); + + // Consume one note. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note_consumed.as_note().nullifier()]).unwrap(); + + // Mark one note as failed many times (exceed max_attempts=3). + let block_num = BlockNumber::from(100u32); + notes_failed(conn, &[note_failed.as_note().nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.as_note().nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.as_note().nullifier()], block_num).unwrap(); + + // Query available notes with max_attempts=3. + let result = available_notes(conn, account_id, block_num, 3).unwrap(); + + // Only note_good should be available (note_consumed is consumed, note_failed exceeded + // attempts). + assert_eq!(result.len(), 1); + assert_eq!(result[0].nullifier(), note_good.as_note().nullifier()); +} + +#[test] +fn available_notes_only_returns_notes_for_specified_account() { + let (conn, _dir) = &mut test_conn(); + + let account_id_1 = mock_network_account_id(); + let account_id_2 = mock_network_account_id_seeded(42); + + let note_acct1 = mock_single_target_note(account_id_1, 10); + let note_acct2 = mock_single_target_note(account_id_2, 20); + + insert_committed_notes(conn, &[note_acct1.clone(), note_acct2]).unwrap(); + + let block_num = BlockNumber::from(100u32); + let result = available_notes(conn, account_id_1, block_num, 30).unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].nullifier(), note_acct1.as_note().nullifier()); +} + +// NOTES FAILED TESTS +// ================================================================================================ + +#[test] +fn notes_failed_increments_attempt_count() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + + let block_num = BlockNumber::from(5u32); + notes_failed(conn, &[note.as_note().nullifier()], block_num).unwrap(); + notes_failed(conn, &[note.as_note().nullifier()], block_num).unwrap(); + + let (attempt_count, last_attempt): (i32, Option) = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.as_note().nullifier())) + .select((schema::notes::attempt_count, schema::notes::last_attempt)) + .first(conn) + .unwrap(); + + assert_eq!(attempt_count, 2); + assert_eq!(last_attempt, Some(conversions::block_num_to_i64(block_num))); +} + +// CHAIN STATE TESTS +// ================================================================================================ + +#[test] +fn upsert_chain_state_updates_singleton() { + let (conn, _dir) = &mut test_conn(); + + let block_num_1 = BlockNumber::from(1u32); + let header_1 = mock_block_header(block_num_1); + upsert_chain_state(conn, block_num_1, &header_1).unwrap(); + + // Upsert again with higher block. + let block_num_2 = BlockNumber::from(2u32); + let header_2 = mock_block_header(block_num_2); + upsert_chain_state(conn, block_num_2, &header_2).unwrap(); + + // Should only have one row. + let row_count: i64 = schema::chain_state::table.count().get_result(conn).unwrap(); + assert_eq!(row_count, 1); + + // Should have the latest block number. + let stored_block_num: i64 = schema::chain_state::table + .select(schema::chain_state::block_num) + .first(conn) + .unwrap(); + assert_eq!(stored_block_num, conversions::block_num_to_i64(block_num_2)); +} + +// NOTE SCRIPT TESTS +// ================================================================================================ + +#[test] +fn note_script_insert_and_lookup() { + let (conn, _dir) = &mut test_conn(); + + // Extract a NoteScript from a mock note. + let account_id = mock_network_account_id(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into_note(); + let script = note.script().clone(); + let root = script.root(); + + // Insert the script. + insert_note_script(conn, &root, &script).unwrap(); + + // Look it up — should match the original. + let found = lookup_note_script(conn, &root).unwrap(); + assert!(found.is_some()); + assert_eq!(found.unwrap().root(), script.root()); +} + +#[test] +fn note_script_lookup_returns_none_for_missing() { + let (conn, _dir) = &mut test_conn(); + + let missing_root = Word::default(); + let found = lookup_note_script(conn, &missing_root).unwrap(); + assert!(found.is_none()); +} + +#[test] +fn note_script_insert_is_idempotent() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into_note(); + let script = note.script().clone(); + let root = script.root(); + + // Insert the same script twice — should not error. + insert_note_script(conn, &root, &script).unwrap(); + insert_note_script(conn, &root, &script).unwrap(); + + // Should still be retrievable. + let found = lookup_note_script(conn, &root).unwrap(); + assert!(found.is_some()); +} + +// HELPERS (domain type construction) +// ================================================================================================ + +/// Creates a mock `Account` for a network account. +/// +/// Uses `AccountBuilder` with minimal components needed for serialization. +fn mock_account(_account_id: NetworkAccountId) -> miden_protocol::account::Account { + use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; + use miden_protocol::account::{AccountBuilder, AccountComponent}; + use miden_standards::account::auth::AuthSingleSig; + + let component_code = miden_standards::code_builder::CodeBuilder::default() + .compile_component_code("test::interface", "pub proc test_proc push.1.2 add end") + .unwrap(); + + let component = AccountComponent::new( + component_code, + vec![], + AccountComponentMetadata::mock("test").with_supports_all_types(), + ) + .unwrap(); + + AccountBuilder::new([0u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Network) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(Word::default()), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap() +} + +/// Creates a mock `BlockHeader` for the given block number. +fn mock_block_header(block_num: BlockNumber) -> miden_protocol::block::BlockHeader { + miden_protocol::block::BlockHeader::mock(block_num, None, None, &[], Word::default()) +} diff --git a/crates/ntx-builder/src/db/schema.rs b/crates/ntx-builder/src/db/schema.rs new file mode 100644 index 0000000000..93dca8ce5e --- /dev/null +++ b/crates/ntx-builder/src/db/schema.rs @@ -0,0 +1,39 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + accounts (order_id) { + order_id -> Integer, + account_id -> Binary, + account_data -> Binary, + transaction_id -> Nullable, + } +} + +diesel::table! { + chain_state (id) { + id -> Integer, + block_num -> BigInt, + block_header -> Binary, + } +} + +diesel::table! { + note_scripts (script_root) { + script_root -> Binary, + script_data -> Binary, + } +} + +diesel::table! { + notes (nullifier) { + nullifier -> Binary, + account_id -> Binary, + note_data -> Binary, + attempt_count -> Integer, + last_attempt -> Nullable, + created_by -> Nullable, + consumed_by -> Nullable, + } +} + +diesel::allow_tables_to_appear_in_same_query!(accounts, chain_state, note_scripts, notes,); diff --git a/crates/ntx-builder/src/db/schema_hash.rs b/crates/ntx-builder/src/db/schema_hash.rs new file mode 100644 index 0000000000..80d00b4c47 --- /dev/null +++ b/crates/ntx-builder/src/db/schema_hash.rs @@ -0,0 +1,191 @@ +//! Schema verification to detect database schema changes. +//! +//! Detects: +//! +//! - Direct modifications to the database schema outside of migrations +//! - Running a node against a database created with different set of migrations +//! - Forgetting to reset the database after schema changes i.e. for a specific migration +//! +//! The verification works by creating an in-memory reference database, applying all +//! migrations to it, and comparing its schema against the actual database schema. + +use diesel::{Connection, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use miden_node_db::SchemaVerificationError; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::migrations::MIGRATIONS; + +/// Represents a schema object for comparison. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +struct SchemaObject { + object_type: String, + name: String, + sql: String, +} + +/// Represents a row from the `sqlite_schema` table. +#[derive(diesel::QueryableByName, Debug)] +struct SqliteSchemaRow { + #[diesel(sql_type = diesel::sql_types::Text)] + schema_type: String, + #[diesel(sql_type = diesel::sql_types::Text)] + name: String, + #[diesel(sql_type = diesel::sql_types::Nullable)] + sql: Option, +} + +/// Extracts all schema objects from a database connection. +fn extract_schema( + conn: &mut SqliteConnection, +) -> Result, SchemaVerificationError> { + let rows: Vec = diesel::sql_query( + "SELECT type as schema_type, name, sql FROM sqlite_schema \ + WHERE type IN ('table', 'index') \ + AND name NOT LIKE 'sqlite_%' \ + AND name NOT LIKE '__diesel_%' \ + ORDER BY type, name", + ) + .load(conn) + .map_err(SchemaVerificationError::SchemaExtraction)?; + + let mut objects: Vec = rows + .into_iter() + .filter_map(|row| { + row.sql.map(|sql| SchemaObject { + object_type: row.schema_type, + name: row.name, + sql, + }) + }) + .collect(); + + objects.sort(); + Ok(objects) +} + +/// Computes the expected schema by applying migrations to an in-memory database. +fn compute_expected_schema() -> Result, SchemaVerificationError> { + let mut conn = SqliteConnection::establish(":memory:") + .map_err(SchemaVerificationError::InMemoryDbCreation)?; + + conn.run_pending_migrations(MIGRATIONS) + .map_err(SchemaVerificationError::MigrationApplication)?; + + extract_schema(&mut conn) +} + +/// Verifies that the database schema matches the expected schema. +/// +/// Creates an in-memory database, applies all migrations, and compares schemas. +/// +/// # Errors +/// +/// Returns `SchemaVerificationError::Mismatch` if schemas differ. +#[instrument(level = "info", target = COMPONENT, skip_all, err)] +pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificationError> { + let expected = compute_expected_schema()?; + let actual = extract_schema(conn)?; + + if actual != expected { + let expected_names: Vec<_> = expected.iter().map(|o| &o.name).collect(); + let actual_names: Vec<_> = actual.iter().map(|o| &o.name).collect(); + + // Find differences for better error messages. + let missing: Vec<_> = expected.iter().filter(|e| !actual.contains(e)).collect(); + let extra: Vec<_> = actual.iter().filter(|a| !expected.contains(a)).collect(); + + tracing::error!( + target: COMPONENT, + ?expected_names, + ?actual_names, + missing_count = missing.len(), + extra_count = extra.len(), + "Database schema mismatch detected" + ); + + // Log specific differences at debug level. + for obj in &missing { + tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + } + for obj in &extra { + tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + } + + return Err(SchemaVerificationError::Mismatch { + expected_count: expected.len(), + actual_count: actual.len(), + missing_count: missing.len(), + extra_count: extra.len(), + }); + } + + tracing::info!( + target: COMPONENT, + objects = expected.len(), + "Database schema verification passed" + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + use miden_node_db::DatabaseError; + + use super::*; + use crate::db::migrations::apply_migrations; + + #[test] + fn verify_schema_passes_for_correct_schema() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + verify_schema(&mut conn).expect("Should pass for correct schema"); + } + + #[test] + fn verify_schema_fails_for_added_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE rogue_table (id INTEGER PRIMARY KEY)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn verify_schema_fails_for_removed_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("DROP TABLE notes").execute(&mut conn).unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn apply_migrations_succeeds_on_fresh_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + apply_migrations(&mut conn).expect("Should succeed on fresh database"); + } + + #[test] + fn apply_migrations_fails_on_tampered_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE tampered (id INTEGER)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + } +} diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index 62088ce6cc..02c9f547ce 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,9 +1,24 @@ use std::num::NonZeroUsize; +use std::path::PathBuf; +use std::sync::Arc; + +use actor::AccountActorContext; +use anyhow::Context; +use block_producer::BlockProducerClient; +use builder::{ChainState, MempoolEventStream}; +use coordinator::Coordinator; +use db::Db; +use futures::TryStreamExt; +use miden_node_utils::lru_cache::LruCache; +use store::StoreClient; +use tokio::sync::{RwLock, mpsc}; +use url::Url; mod actor; mod block_producer; mod builder; mod coordinator; +pub(crate) mod db; mod store; pub use builder::NetworkTransactionBuilder; @@ -13,12 +28,251 @@ pub use builder::NetworkTransactionBuilder; const COMPONENT: &str = "miden-ntx-builder"; -/// Maximum number of network notes a network transaction is allowed to consume. -const MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).unwrap(); -const _: () = assert!(MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); +/// Default maximum number of network notes a network transaction is allowed to consume. +const DEFAULT_MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).expect("literal is non-zero"); +const _: () = assert!(DEFAULT_MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); -/// Maximum number of network transactions which should be in progress concurrently. +/// Default maximum number of network transactions which should be in progress concurrently. /// /// This only counts transactions which are being computed locally and does not include /// uncommitted transactions in the mempool. -const MAX_IN_PROGRESS_TXS: usize = 4; +const DEFAULT_MAX_CONCURRENT_TXS: usize = 4; + +/// Default maximum number of blocks to keep in the chain MMR. +const DEFAULT_MAX_BLOCK_COUNT: usize = 4; + +/// Default channel capacity for account loading from the store. +const DEFAULT_ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; + +/// Default channel size for actor event channels. +const DEFAULT_ACTOR_CHANNEL_SIZE: usize = 100; + +/// Default maximum number of attempts to execute a failing note before dropping it. +const DEFAULT_MAX_NOTE_ATTEMPTS: usize = 30; + +/// Default script cache size. +const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = + NonZeroUsize::new(1_000).expect("literal is non-zero"); + +// CONFIGURATION +// ================================================================================================= + +/// Configuration for the Network Transaction Builder. +/// +/// This struct contains all the settings needed to create and run a `NetworkTransactionBuilder`. +#[derive(Debug, Clone)] +pub struct NtxBuilderConfig { + /// Address of the store gRPC server (ntx-builder API). + pub store_url: Url, + + /// Address of the block producer gRPC server. + pub block_producer_url: Url, + + /// Address of the validator gRPC server. + pub validator_url: Url, + + /// Address of the remote transaction prover. If `None`, transactions will be proven locally. + pub tx_prover_url: Option, + + /// Size of the LRU cache for note scripts. Scripts are fetched from the store and cached + /// to avoid repeated gRPC calls. + pub script_cache_size: NonZeroUsize, + + /// Maximum number of network transactions which should be in progress concurrently across + /// all account actors. + pub max_concurrent_txs: usize, + + /// Maximum number of network notes a single transaction is allowed to consume. + pub max_notes_per_tx: NonZeroUsize, + + /// Maximum number of attempts to execute a failing note before dropping it. + /// Notes use exponential backoff between attempts. + pub max_note_attempts: usize, + + /// Maximum number of blocks to keep in the chain MMR. Older blocks are pruned. + pub max_block_count: usize, + + /// Channel capacity for loading accounts from the store during startup. + pub account_channel_capacity: usize, + + /// Channel size for each actor's event channel. + pub actor_channel_size: usize, + + /// Path to the SQLite database file used for persistent state. + pub database_filepath: PathBuf, +} + +impl NtxBuilderConfig { + pub fn new( + store_url: Url, + block_producer_url: Url, + validator_url: Url, + database_filepath: PathBuf, + ) -> Self { + Self { + store_url, + block_producer_url, + validator_url, + tx_prover_url: None, + script_cache_size: DEFAULT_SCRIPT_CACHE_SIZE, + max_concurrent_txs: DEFAULT_MAX_CONCURRENT_TXS, + max_notes_per_tx: DEFAULT_MAX_NOTES_PER_TX, + max_note_attempts: DEFAULT_MAX_NOTE_ATTEMPTS, + max_block_count: DEFAULT_MAX_BLOCK_COUNT, + account_channel_capacity: DEFAULT_ACCOUNT_CHANNEL_CAPACITY, + actor_channel_size: DEFAULT_ACTOR_CHANNEL_SIZE, + database_filepath, + } + } + + /// Sets the remote transaction prover URL. + /// + /// If not set, transactions will be proven locally. + #[must_use] + pub fn with_tx_prover_url(mut self, url: Option) -> Self { + self.tx_prover_url = url; + self + } + + /// Sets the script cache size. + #[must_use] + pub fn with_script_cache_size(mut self, size: NonZeroUsize) -> Self { + self.script_cache_size = size; + self + } + + /// Sets the maximum number of concurrent transactions. + #[must_use] + pub fn with_max_concurrent_txs(mut self, max: usize) -> Self { + self.max_concurrent_txs = max; + self + } + + /// Sets the maximum number of notes per transaction. + /// + /// # Panics + /// + /// Panics if `max` exceeds `miden_tx::MAX_NUM_CHECKER_NOTES`. + #[must_use] + pub fn with_max_notes_per_tx(mut self, max: NonZeroUsize) -> Self { + assert!( + max.get() <= miden_tx::MAX_NUM_CHECKER_NOTES, + "max_notes_per_tx ({}) exceeds MAX_NUM_CHECKER_NOTES ({})", + max, + miden_tx::MAX_NUM_CHECKER_NOTES + ); + self.max_notes_per_tx = max; + self + } + + /// Sets the maximum number of note execution attempts. + #[must_use] + pub fn with_max_note_attempts(mut self, max: usize) -> Self { + self.max_note_attempts = max; + self + } + + /// Sets the maximum number of blocks to keep in the chain MMR. + #[must_use] + pub fn with_max_block_count(mut self, max: usize) -> Self { + self.max_block_count = max; + self + } + + /// Sets the account channel capacity for startup loading. + #[must_use] + pub fn with_account_channel_capacity(mut self, capacity: usize) -> Self { + self.account_channel_capacity = capacity; + self + } + + /// Sets the actor event channel size. + #[must_use] + pub fn with_actor_channel_size(mut self, size: usize) -> Self { + self.actor_channel_size = size; + self + } + + /// Builds and initializes the network transaction builder. + /// + /// This method connects to the store and block producer services, fetches the current + /// chain tip, and subscribes to mempool events. + /// + /// # Errors + /// + /// Returns an error if: + /// - The store connection fails + /// - The mempool subscription fails (after retries) + /// - The store contains no blocks (not bootstrapped) + pub async fn build(self) -> anyhow::Result { + // Set up the database (bootstrap + connection pool). + let db = Db::setup(self.database_filepath.clone()).await?; + + // Purge inflight state from previous run. + db.purge_inflight().await.context("failed to purge inflight state")?; + + let script_cache = LruCache::new(self.script_cache_size); + let coordinator = + Coordinator::new(self.max_concurrent_txs, self.actor_channel_size, db.clone()); + + let store = StoreClient::new(self.store_url.clone()); + let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); + + let (chain_tip_header, chain_mmr, mempool_events) = loop { + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .context("store should contain a latest block")?; + + match block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + { + Ok(subscription) => { + let stream: MempoolEventStream = Box::pin(subscription.into_stream()); + break (chain_tip_header, chain_mmr, stream); + }, + Err(status) if status.code() == tonic::Code::InvalidArgument => { + tracing::warn!( + err = %status, + "mempool subscription failed due to chain tip desync, retrying" + ); + }, + Err(err) => return Err(err).context("failed to subscribe to mempool events"), + } + }; + + // Store the chain tip in the DB. + db.upsert_chain_state(chain_tip_header.block_num(), chain_tip_header.clone()) + .await + .context("failed to upsert chain state")?; + + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + + let (notification_tx, notification_rx) = mpsc::channel(1); + + let actor_context = AccountActorContext { + block_producer_url: self.block_producer_url.clone(), + validator_url: self.validator_url.clone(), + tx_prover_url: self.tx_prover_url.clone(), + chain_state: chain_state.clone(), + store: store.clone(), + script_cache, + max_notes_per_tx: self.max_notes_per_tx, + max_note_attempts: self.max_note_attempts, + db: db.clone(), + notification_tx, + }; + + Ok(NetworkTransactionBuilder::new( + self, + coordinator, + store, + db, + chain_state, + actor_context, + mempool_events, + notification_rx, + )) + } +} diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 42a418cc29..1f8c7b5f72 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -4,7 +4,6 @@ use std::time::Duration; use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; use miden_node_proto::domain::account::{AccountDetails, AccountResponse, NetworkAccountId}; -use miden_node_proto::domain::note::NetworkNote; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::{self as proto}; @@ -17,6 +16,7 @@ use miden_protocol::account::{ AccountId, PartialAccount, PartialStorage, + StorageMapKey, StorageMapWitness, StorageSlotName, }; @@ -26,6 +26,7 @@ use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks, PartialMmr}; use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::note::NoteScript; use miden_protocol::transaction::AccountInputs; +use miden_standards::note::AccountTargetNetworkNote; use miden_tx::utils::{Deserializable, Serializable}; use thiserror::Error; use tracing::{info, instrument}; @@ -196,7 +197,7 @@ impl StoreClient { &self, network_account_id: NetworkAccountId, block_num: u32, - ) -> Result, StoreError> { + ) -> Result, StoreError> { // Upper bound of each note is ~10KB. Limit page size to ~10MB. const PAGE_SIZE: u64 = 1024; @@ -215,7 +216,7 @@ impl StoreClient { all_notes.reserve(resp.notes.len()); for note in resp.notes { - all_notes.push(NetworkNote::try_from(note)?); + all_notes.push(AccountTargetNetworkNote::try_from(note)?); } match resp.next_token { @@ -236,10 +237,10 @@ impl StoreClient { &self, sender: tokio::sync::mpsc::Sender, ) -> Result<(), StoreError> { - let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + let mut block_range = BlockNumber::GENESIS..=BlockNumber::MAX; while let Some(next_start) = self.load_accounts_page(block_range, &sender).await? { - block_range = next_start..=BlockNumber::from(u32::MAX); + block_range = next_start..=BlockNumber::MAX; } Ok(()) @@ -365,7 +366,7 @@ impl StoreClient { &self, root: Word, ) -> Result, StoreError> { - let request = proto::note::NoteRoot { root: Some(root.into()) }; + let request = proto::note::NoteScriptRoot { root: Some(root.into()) }; let script = self.inner.clone().get_note_script_by_root(request).await?.into_inner().script; @@ -421,7 +422,7 @@ impl StoreClient { &self, account_id: AccountId, slot_name: StorageSlotName, - map_key: Word, + map_key: StorageMapKey, block_num: Option, ) -> Result { // Construct proto request. diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 6d3589ca3d..b0a7461d3c 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -33,7 +33,14 @@ assert_matches = { workspace = true } proptest = { version = "1.7" } [build-dependencies] -fs-err = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { version = "7.6" } -tonic-prost-build = { workspace = true } +build-rs = { workspace = true } +fs-err = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { version = "7.6" } +tonic-prost-build = { workspace = true } + +[package.metadata.cargo-machete] +ignored = [ + "tonic-prost", # used in generated OUT_DIR code +] diff --git a/crates/proto/build.rs b/crates/proto/build.rs index b0ac773a72..07117f4669 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -1,5 +1,4 @@ -use std::env; -use std::path::{Path, PathBuf}; +use std::path::Path; use fs_err as fs; use miden_node_proto_build::{ @@ -14,27 +13,14 @@ use miden_node_proto_build::{ use miette::{Context, IntoDiagnostic}; use tonic_prost_build::FileDescriptorSet; -/// Generates Rust protobuf bindings using miden-node-proto-build. -/// -/// This is done only if `BUILD_PROTO` environment variable is set to `1` to avoid running the -/// script on crates.io where repo-level .proto files are not available. +/// Generates Rust protobuf bindings using `miden-node-proto-build`. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-changed=../../proto/proto"); - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); + miden_node_rocksdb_cxx_linkage_fix::configure(); - // Skip this build script in BUILD_PROTO environment variable is not set to `1`. - if env::var("BUILD_PROTO").unwrap_or("0".to_string()) == "0" { - return Ok(()); - } - - let crate_root: PathBuf = - env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR should be set").into(); - let dst_dir = crate_root.join("src").join("generated"); + let dst_dir = build_rs::input::out_dir().join("generated"); // Remove all existing files. - fs::remove_dir_all(&dst_dir) - .into_diagnostic() - .wrap_err("removing existing files")?; + let _ = fs::remove_dir_all(&dst_dir); fs::create_dir(&dst_dir) .into_diagnostic() .wrap_err("creating destination folder")?; @@ -69,12 +55,12 @@ fn generate_bindings(file_descriptors: FileDescriptorSet, dst_dir: &Path) -> mie } /// Generate `mod.rs` which includes all files in the folder as submodules. -fn generate_mod_rs(directory: impl AsRef) -> std::io::Result<()> { - let mod_filepath = directory.as_ref().join("mod.rs"); +fn generate_mod_rs(dst_dir: impl AsRef) -> std::io::Result<()> { + let mod_filepath = dst_dir.as_ref().join("mod.rs"); // Discover all submodules by iterating over the folder contents. let mut submodules = Vec::new(); - for entry in fs::read_dir(directory.as_ref())? { + for entry in fs::read_dir(dst_dir.as_ref())? { let entry = entry?; let path = entry.path(); if path.is_file() { @@ -90,16 +76,8 @@ fn generate_mod_rs(directory: impl AsRef) -> std::io::Result<()> { submodules.sort(); - let contents = submodules.iter().map(|f| format!("pub mod {f};\n")); - let contents = std::iter::once( - "#![allow(clippy::pedantic, reason = \"generated by build.rs and tonic\")]\n".to_string(), - ) - .chain(std::iter::once( - "#![allow(clippy::large_enum_variant, reason = \"generated by build.rs and tonic\")]\n\n" - .to_string(), - )) - .chain(contents) - .collect::(); + let modules = submodules.iter().map(|f| format!("pub mod {f};\n")); + let contents = modules.into_iter().collect::(); fs::write(mod_filepath, contents) } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 8e06d33699..aeec888328 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -9,6 +9,7 @@ use miden_protocol::account::{ AccountId, AccountStorageHeader, StorageMap, + StorageMapKey, StorageSlotHeader, StorageSlotName, StorageSlotType, @@ -223,7 +224,7 @@ impl TryFrom), + MapKeys(Vec), } impl @@ -426,7 +427,7 @@ pub enum StorageMapEntries { /// All storage map entries (key-value pairs) without proofs. /// Used when all entries are requested for small maps. - AllEntries(Vec<(Word, Word)>), + AllEntries(Vec<(StorageMapKey, Word)>), /// Specific entries with their SMT proofs for client-side verification. /// Used when specific keys are requested from the storage map. @@ -468,7 +469,10 @@ impl AccountStorageMapDetails { /// Creates storage map details from forest-queried entries. /// /// Returns `LimitExceeded` if too many entries. - pub fn from_forest_entries(slot_name: StorageSlotName, entries: Vec<(Word, Word)>) -> Self { + pub fn from_forest_entries( + slot_name: StorageSlotName, + entries: Vec<(StorageMapKey, Word)>, + ) -> Self { if entries.len() > Self::MAX_RETURN_ENTRIES { Self { slot_name, @@ -551,7 +555,8 @@ impl TryFrom let key = entry .key .ok_or(StorageMapEntry::missing_field(stringify!(key)))? - .try_into()?; + .try_into() + .map(StorageMapKey::new)?; let value = entry .value .ok_or(StorageMapEntry::missing_field(stringify!(value)))? diff --git a/crates/proto/src/domain/account/tests.rs b/crates/proto/src/domain/account/tests.rs index 695813d990..c25511d605 100644 --- a/crates/proto/src/domain/account/tests.rs +++ b/crates/proto/src/domain/account/tests.rs @@ -1,3 +1,5 @@ +use miden_protocol::account::StorageMapKey; + use super::*; fn word_from_u32(arr: [u32; 4]) -> Word { @@ -12,8 +14,11 @@ fn test_slot_name() -> StorageSlotName { fn account_storage_map_details_from_forest_entries() { let slot_name = test_slot_name(); let entries = vec![ - (word_from_u32([1, 2, 3, 4]), word_from_u32([5, 6, 7, 8])), - (word_from_u32([9, 10, 11, 12]), word_from_u32([13, 14, 15, 16])), + (StorageMapKey::new(word_from_u32([1, 2, 3, 4])), word_from_u32([5, 6, 7, 8])), + ( + StorageMapKey::new(word_from_u32([9, 10, 11, 12])), + word_from_u32([13, 14, 15, 16]), + ), ]; let details = AccountStorageMapDetails::from_forest_entries(slot_name.clone(), entries.clone()); @@ -28,7 +33,7 @@ fn account_storage_map_details_from_forest_entries_limit_exceeded() { // Create more entries than MAX_RETURN_ENTRIES let entries: Vec<_> = (0..=AccountStorageMapDetails::MAX_RETURN_ENTRIES) .map(|i| { - let key = word_from_u32([i as u32, 0, 0, 0]); + let key = StorageMapKey::from_index(i as u32); let value = word_from_u32([0, 0, 0, i as u32]); (key, value) }) diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index aa94f306dd..112f84e50b 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -3,7 +3,14 @@ use std::ops::RangeInclusive; use miden_protocol::account::AccountId; use miden_protocol::block::nullifier_tree::NullifierWitness; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, FeeParameters}; +use miden_protocol::block::{ + BlockBody, + BlockHeader, + BlockInputs, + BlockNumber, + FeeParameters, + SignedBlock, +}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_protocol::note::{NoteId, NoteInclusionProof}; use miden_protocol::transaction::PartialBlockchain; @@ -115,6 +122,84 @@ impl TryFrom for BlockHeader { } } +// BLOCK BODY +// ================================================================================================ + +impl From<&BlockBody> for proto::blockchain::BlockBody { + fn from(body: &BlockBody) -> Self { + Self { block_body: body.to_bytes() } + } +} + +impl From for proto::blockchain::BlockBody { + fn from(body: BlockBody) -> Self { + (&body).into() + } +} + +impl TryFrom<&proto::blockchain::BlockBody> for BlockBody { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::BlockBody) -> Result { + value.try_into() + } +} + +impl TryFrom for BlockBody { + type Error = ConversionError; + fn try_from(value: proto::blockchain::BlockBody) -> Result { + BlockBody::read_from_bytes(&value.block_body) + .map_err(|source| ConversionError::deserialization_error("BlockBody", source)) + } +} + +// SIGNED BLOCK +// ================================================================================================ + +impl From<&SignedBlock> for proto::blockchain::SignedBlock { + fn from(block: &SignedBlock) -> Self { + Self { + header: Some(block.header().into()), + body: Some(block.body().into()), + signature: Some(block.signature().into()), + } + } +} + +impl From for proto::blockchain::SignedBlock { + fn from(block: SignedBlock) -> Self { + (&block).into() + } +} + +impl TryFrom<&proto::blockchain::SignedBlock> for SignedBlock { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::SignedBlock) -> Result { + value.try_into() + } +} + +impl TryFrom for SignedBlock { + type Error = ConversionError; + fn try_from(value: proto::blockchain::SignedBlock) -> Result { + let header = value + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + let body = value + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + let signature = value + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + Ok(SignedBlock::new_unchecked(header, body, signature)) + } +} + // BLOCK INPUTS // ================================================================================================ diff --git a/crates/proto/src/domain/digest.rs b/crates/proto/src/domain/digest.rs index 7be94e5304..08d8c3f9a1 100644 --- a/crates/proto/src/domain/digest.rs +++ b/crates/proto/src/domain/digest.rs @@ -1,6 +1,7 @@ use std::fmt::{Debug, Display, Formatter}; use hex::{FromHex, ToHex}; +use miden_protocol::account::StorageMapKey; use miden_protocol::note::NoteId; use miden_protocol::{Felt, StarkField, Word}; @@ -136,6 +137,18 @@ impl From<&Word> for proto::primitives::Digest { } } +impl From for proto::primitives::Digest { + fn from(value: StorageMapKey) -> Self { + Into::::into(value).into() + } +} + +impl From<&StorageMapKey> for proto::primitives::Digest { + fn from(value: &StorageMapKey) -> Self { + (*value).into() + } +} + impl From<&NoteId> for proto::primitives::Digest { fn from(value: &NoteId) -> Self { value.as_word().into() @@ -185,6 +198,14 @@ impl TryFrom for Word { } } +impl TryFrom for StorageMapKey { + type Error = ConversionError; + + fn try_from(value: proto::primitives::Digest) -> Result { + Ok(StorageMapKey::new(value.try_into()?)) + } +} + impl TryFrom<&proto::primitives::Digest> for [Felt; 4] { type Error = ConversionError; diff --git a/crates/proto/src/domain/mempool.rs b/crates/proto/src/domain/mempool.rs index 332cd67725..c9bf76bfc9 100644 --- a/crates/proto/src/domain/mempool.rs +++ b/crates/proto/src/domain/mempool.rs @@ -5,8 +5,8 @@ use miden_protocol::block::BlockHeader; use miden_protocol::note::Nullifier; use miden_protocol::transaction::TransactionId; use miden_protocol::utils::{Deserializable, Serializable}; +use miden_standards::note::AccountTargetNetworkNote; -use super::note::NetworkNote; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -15,7 +15,7 @@ pub enum MempoolEvent { TransactionAdded { id: TransactionId, nullifiers: Vec, - network_notes: Vec, + network_notes: Vec, account_delta: Option, }, BlockCommitted { diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 94fea5bebc..f92ac75179 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ Note, @@ -12,17 +11,41 @@ use miden_protocol::note::{ NoteScript, NoteTag, NoteType, - Nullifier, }; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{MastForest, MastNodeId, Word}; -use miden_standards::note::{NetworkAccountTarget, NetworkAccountTargetError}; -use thiserror::Error; +use miden_standards::note::AccountTargetNetworkNote; -use super::account::NetworkAccountId; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; +// NOTE TYPE +// ================================================================================================ + +impl From for proto::note::NoteType { + fn from(note_type: NoteType) -> Self { + match note_type { + NoteType::Public => proto::note::NoteType::Public, + NoteType::Private => proto::note::NoteType::Private, + } + } +} + +impl TryFrom for NoteType { + type Error = ConversionError; + + fn try_from(note_type: proto::note::NoteType) -> Result { + match note_type { + proto::note::NoteType::Public => Ok(NoteType::Public), + proto::note::NoteType::Private => Ok(NoteType::Private), + proto::note::NoteType::Unspecified => Err(ConversionError::EnumDiscriminantOutOfRange), + } + } +} + +// NOTE METADATA +// ================================================================================================ + impl TryFrom for NoteMetadata { type Error = ConversionError; @@ -31,7 +54,9 @@ impl TryFrom for NoteMetadata { .sender .ok_or_else(|| proto::note::NoteMetadata::missing_field(stringify!(sender)))? .try_into()?; - let note_type = NoteType::try_from(u64::from(value.note_type))?; + let note_type = proto::note::NoteType::try_from(value.note_type) + .map_err(|_| ConversionError::EnumDiscriminantOutOfRange)? + .try_into()?; let tag = NoteTag::new(value.tag); // Deserialize attachment if present @@ -42,7 +67,7 @@ impl TryFrom for NoteMetadata { .map_err(|err| ConversionError::deserialization_error("NoteAttachment", err))? }; - Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) + Ok(NoteMetadata::new(sender, note_type).with_tag(tag).with_attachment(attachment)) } } @@ -64,9 +89,9 @@ impl From for proto::note::Note { } } -impl From for proto::note::NetworkNote { - fn from(note: NetworkNote) -> Self { - let note = Note::from(note); +impl From for proto::note::NetworkNote { + fn from(note: AccountTargetNetworkNote) -> Self { + let note = note.into_note(); Self { metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: NoteDetails::from(note).to_bytes(), @@ -74,10 +99,26 @@ impl From for proto::note::NetworkNote { } } +impl TryFrom for AccountTargetNetworkNote { + type Error = ConversionError; + + fn try_from(value: proto::note::NetworkNote) -> Result { + let details = NoteDetails::read_from_bytes(&value.details) + .map_err(|err| ConversionError::deserialization_error("NoteDetails", err))?; + let (assets, recipient) = details.into_parts(); + let metadata: NoteMetadata = value + .metadata + .ok_or_else(|| proto::note::NetworkNote::missing_field(stringify!(metadata)))? + .try_into()?; + let note = Note::new(assets, metadata, recipient); + AccountTargetNetworkNote::new(note).map_err(ConversionError::NetworkNoteError) + } +} + impl From for proto::note::NoteMetadata { fn from(val: NoteMetadata) -> Self { let sender = Some(val.sender().into()); - let note_type = val.note_type() as u32; + let note_type = proto::note::NoteType::from(val.note_type()) as i32; let tag = val.tag().as_u32(); let attachment = val.attachment().to_bytes(); @@ -178,148 +219,6 @@ impl TryFrom for Note { } } -// NETWORK NOTE -// ================================================================================================ - -/// An enum that wraps around notes used in a network mode. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum NetworkNote { - SingleTarget(SingleTargetNetworkNote), -} - -impl NetworkNote { - pub fn inner(&self) -> &Note { - match self { - NetworkNote::SingleTarget(note) => note.inner(), - } - } - - pub fn metadata(&self) -> &NoteMetadata { - self.inner().metadata() - } - - pub fn nullifier(&self) -> Nullifier { - self.inner().nullifier() - } - - pub fn id(&self) -> NoteId { - self.inner().id() - } -} - -impl From for Note { - fn from(value: NetworkNote) -> Self { - match value { - NetworkNote::SingleTarget(note) => note.into(), - } - } -} - -impl TryFrom for NetworkNote { - type Error = NetworkNoteError; - - fn try_from(note: Note) -> Result { - SingleTargetNetworkNote::try_from(note).map(NetworkNote::SingleTarget) - } -} - -impl TryFrom for NetworkNote { - type Error = ConversionError; - - fn try_from(proto_note: proto::note::NetworkNote) -> Result { - from_proto(proto_note) - } -} - -// SINGLE TARGET NETWORK NOTE -// ================================================================================================ - -/// A newtype that wraps around notes targeting a single network account. -/// -/// A note is considered a single-target network note if its attachment -/// is a valid `NetworkAccountTarget`. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct SingleTargetNetworkNote { - note: Note, - account_target: NetworkAccountTarget, -} - -impl SingleTargetNetworkNote { - pub fn inner(&self) -> &Note { - &self.note - } - - pub fn metadata(&self) -> &NoteMetadata { - self.inner().metadata() - } - - pub fn nullifier(&self) -> Nullifier { - self.inner().nullifier() - } - - pub fn id(&self) -> NoteId { - self.inner().id() - } - - /// The network account ID that this note targets. - pub fn account_id(&self) -> NetworkAccountId { - self.account_target.target_id().try_into().expect("always a network account ID") - } - - pub fn can_be_consumed(&self, block_num: BlockNumber) -> Option { - self.account_target.execution_hint().can_be_consumed(block_num) - } -} - -impl From for Note { - fn from(value: SingleTargetNetworkNote) -> Self { - value.note - } -} - -impl TryFrom for SingleTargetNetworkNote { - type Error = NetworkNoteError; - - fn try_from(note: Note) -> Result { - // Single-target network notes are identified by having a NetworkAccountTarget attachment - let attachment = note.metadata().attachment(); - let account_target = NetworkAccountTarget::try_from(attachment) - .map_err(NetworkNoteError::InvalidAttachment)?; - Ok(Self { note, account_target }) - } -} - -impl TryFrom for SingleTargetNetworkNote { - type Error = ConversionError; - - fn try_from(proto_note: proto::note::NetworkNote) -> Result { - from_proto(proto_note) - } -} - -/// Helper function to deduplicate implementations `TryFrom`. -fn from_proto(proto_note: proto::note::NetworkNote) -> Result -where - T: TryFrom, - T::Error: Into, -{ - let details = NoteDetails::read_from_bytes(&proto_note.details) - .map_err(|err| ConversionError::deserialization_error("NoteDetails", err))?; - let (assets, recipient) = details.into_parts(); - let metadata: NoteMetadata = proto_note - .metadata - .ok_or_else(|| proto::note::NetworkNote::missing_field(stringify!(metadata)))? - .try_into()?; - let note = Note::new(assets, metadata, recipient); - T::try_from(note).map_err(Into::into) -} - -#[derive(Debug, Error)] -pub enum NetworkNoteError { - #[error("note does not have a valid NetworkAccountTarget attachment: {0}")] - InvalidAttachment(#[source] NetworkAccountTargetError), -} - // NOTE SCRIPT // ================================================================================================ diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index d2fc936167..04493e6960 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -6,10 +6,9 @@ pub use miden_node_grpc_error_macro::GrpcError; use miden_protocol::crypto::merkle::smt::{SmtLeafError, SmtProofError}; use miden_protocol::errors::{AccountError, AssetError, FeeError, NoteError, StorageSlotNameError}; use miden_protocol::utils::DeserializationError; +use miden_standards::note::NetworkAccountTargetError; use thiserror::Error; -use crate::domain::note::NetworkNoteError; - #[cfg(test)] mod test_macro; @@ -28,7 +27,7 @@ pub enum ConversionError { #[error("note error")] NoteError(#[from] NoteError), #[error("network note error")] - NetworkNoteError(#[from] NetworkNoteError), + NetworkNoteError(#[source] NetworkAccountTargetError), #[error("SMT leaf error")] SmtLeafError(#[from] SmtLeafError), #[error("SMT proof error")] diff --git a/crates/proto/src/generated/account.rs b/crates/proto/src/generated/account.rs deleted file mode 100644 index 6ff6135626..0000000000 --- a/crates/proto/src/generated/account.rs +++ /dev/null @@ -1,99 +0,0 @@ -// This file is @generated by prost-build. -/// Uniquely identifies a specific account. -/// -/// A Miden account ID is a 120-bit value derived from the commitments to account code and storage, -/// and a random user-provided seed. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -#[prost(skip_debug)] -pub struct AccountId { - /// 15 bytes (120 bits) encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::account::account_id::AccountId\]. - #[prost(bytes = "vec", tag = "1")] - pub id: ::prost::alloc::vec::Vec, -} -/// The state of an account at a specific block height. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountSummary { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The current account commitment or zero if the account does not exist. - #[prost(message, optional, tag = "2")] - pub account_commitment: ::core::option::Option, - /// Block number at which the summary was made. - #[prost(uint32, tag = "3")] - pub block_num: u32, -} -/// Represents the storage header of an account. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountStorageHeader { - /// Storage slots with their types and data. - #[prost(message, repeated, tag = "1")] - pub slots: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `AccountStorageHeader`. -pub mod account_storage_header { - /// A single storage slot in the account storage header. - #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] - pub struct StorageSlot { - /// The name of the storage slot. - #[prost(string, tag = "1")] - pub slot_name: ::prost::alloc::string::String, - /// The type of the storage slot. - #[prost(uint32, tag = "2")] - pub slot_type: u32, - /// The data (Word) for this storage slot. - /// For value slots (slot_type=0), this is the actual value stored in the slot. - /// For map slots (slot_type=1), this is the root of the storage map. - #[prost(message, optional, tag = "3")] - pub commitment: ::core::option::Option, - } -} -/// An account details. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountDetails { - /// Account summary. - #[prost(message, optional, tag = "1")] - pub summary: ::core::option::Option, - /// Account details encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::account::Account\]. - #[prost(bytes = "vec", optional, tag = "2")] - pub details: ::core::option::Option<::prost::alloc::vec::Vec>, -} -/// An account header. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountHeader { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Vault root hash. - #[prost(message, optional, tag = "2")] - pub vault_root: ::core::option::Option, - /// Storage root hash. - #[prost(message, optional, tag = "3")] - pub storage_commitment: ::core::option::Option, - /// Code root hash. - #[prost(message, optional, tag = "4")] - pub code_commitment: ::core::option::Option, - /// Account nonce. - #[prost(uint64, tag = "5")] - pub nonce: u64, -} -/// An account witness. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountWitness { - /// Account ID for which this proof is requested. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The account ID within the proof, which may be different from the above account ID. - /// This can happen when the requested account ID's prefix matches the prefix of an existing - /// account ID in the tree. Then the witness will prove inclusion of this witness ID in the tree. - #[prost(message, optional, tag = "2")] - pub witness_id: ::core::option::Option, - /// The state commitment whose inclusion the witness proves. - #[prost(message, optional, tag = "3")] - pub commitment: ::core::option::Option, - /// The merkle path of the state commitment in the account tree. - #[prost(message, optional, tag = "4")] - pub path: ::core::option::Option, -} diff --git a/crates/proto/src/generated/block_producer.rs b/crates/proto/src/generated/block_producer.rs deleted file mode 100644 index 9c95e6a75c..0000000000 --- a/crates/proto/src/generated/block_producer.rs +++ /dev/null @@ -1,657 +0,0 @@ -// This file is @generated by prost-build. -/// Request to subscribe to mempool events. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MempoolSubscriptionRequest { - /// The caller's current chain height. - /// - /// Request will be rejected if this does not match the mempool's current view. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, -} -/// Event from the mempool. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MempoolEvent { - #[prost(oneof = "mempool_event::Event", tags = "1, 2, 3")] - pub event: ::core::option::Option, -} -/// Nested message and enum types in `MempoolEvent`. -pub mod mempool_event { - /// A block was committed. - /// - /// This event is sent when a block is committed to the chain. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct BlockCommitted { - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, - #[prost(message, repeated, tag = "2")] - pub transactions: ::prost::alloc::vec::Vec< - super::super::transaction::TransactionId, - >, - } - /// A transaction was added to the mempool. - /// - /// This event is sent when a transaction is added to the mempool. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct TransactionAdded { - /// The ID of the transaction. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Nullifiers consumed by the transaction. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Network notes created by the transaction. - #[prost(message, repeated, tag = "3")] - pub network_notes: ::prost::alloc::vec::Vec, - /// Changes to a network account, if any. This includes creation of new network accounts. - /// - /// The account delta is encoded using \[winter_utils::Serializable\] implementation - /// for \[miden_protocol::account::delta::AccountDelta\]. - #[prost(bytes = "vec", optional, tag = "4")] - pub network_account_delta: ::core::option::Option<::prost::alloc::vec::Vec>, - } - /// A set of transactions was reverted and dropped from the mempool. - /// - /// This event is sent when a set of transactions are reverted and dropped from the mempool. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct TransactionsReverted { - #[prost(message, repeated, tag = "1")] - pub reverted: ::prost::alloc::vec::Vec, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Event { - #[prost(message, tag = "1")] - TransactionAdded(TransactionAdded), - #[prost(message, tag = "2")] - BlockCommitted(BlockCommitted), - #[prost(message, tag = "3")] - TransactionsReverted(TransactionsReverted), - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/Status", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("block_producer.Api", "Status")); - self.inner.unary(req, path, codec).await - } - /// Submits proven transaction to the Miden network. Returns the node's current block height. - pub async fn submit_proven_transaction( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransaction, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/SubmitProvenTransaction", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("block_producer.Api", "SubmitProvenTransaction"), - ); - self.inner.unary(req, path, codec).await - } - /// Submits a proven batch to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - pub async fn submit_proven_batch( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/SubmitProvenBatch", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("block_producer.Api", "SubmitProvenBatch")); - self.inner.unary(req, path, codec).await - } - /// Subscribe to mempool events. - /// - /// The request will be rejected if the caller and the mempool disagree on the current chain tip. - /// This prevents potential desync issues. The caller can resolve this by resync'ing its chain state. - /// - /// The event stream will contain all events after the chain tip. This includes all currently inflight - /// events that have not yet been committed to the chain. - /// - /// Currently only a single active subscription is supported. Subscription requests will cancel the active - /// subscription, if any. - pub async fn mempool_subscription( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer.Api/MempoolSubscription", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("block_producer.Api", "MempoolSubscription")); - self.inner.server_streaming(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits proven transaction to the Miden network. Returns the node's current block height. - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits a proven batch to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - async fn submit_proven_batch( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Server streaming response type for the MempoolSubscription method. - type MempoolSubscriptionStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > - + std::marker::Send - + 'static; - /// Subscribe to mempool events. - /// - /// The request will be rejected if the caller and the mempool disagree on the current chain tip. - /// This prevents potential desync issues. The caller can resolve this by resync'ing its chain state. - /// - /// The event stream will contain all events after the chain tip. This includes all currently inflight - /// events that have not yet been committed to the chain. - /// - /// Currently only a single active subscription is supported. Subscription requests will cancel the active - /// subscription, if any. - async fn mempool_subscription( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/block_producer.Api/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::super::rpc::BlockProducerStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer.Api/SubmitProvenTransaction" => { - #[allow(non_camel_case_types)] - struct SubmitProvenTransactionSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransaction, - > for SubmitProvenTransactionSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransaction, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_transaction(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenTransactionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer.Api/SubmitProvenBatch" => { - #[allow(non_camel_case_types)] - struct SubmitProvenBatchSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransactionBatch, - > for SubmitProvenBatchSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_batch(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenBatchSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer.Api/MempoolSubscription" => { - #[allow(non_camel_case_types)] - struct MempoolSubscriptionSvc(pub Arc); - impl< - T: Api, - > tonic::server::ServerStreamingService< - super::MempoolSubscriptionRequest, - > for MempoolSubscriptionSvc { - type Response = super::MempoolEvent; - type ResponseStream = T::MempoolSubscriptionStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::mempool_subscription(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = MempoolSubscriptionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.server_streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "block_producer.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs deleted file mode 100644 index 69bbe2e28e..0000000000 --- a/crates/proto/src/generated/blockchain.rs +++ /dev/null @@ -1,113 +0,0 @@ -// This file is @generated by prost-build. -/// Represents a block. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Block { - /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::Block\]. - #[prost(bytes = "vec", tag = "1")] - pub block: ::prost::alloc::vec::Vec, -} -/// Represents a proposed block. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProposedBlock { - /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::ProposedBlock\]. - #[prost(bytes = "vec", tag = "1")] - pub proposed_block: ::prost::alloc::vec::Vec, -} -/// Represents a block or nothing. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeBlock { - /// The requested block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::Block\]. - #[prost(bytes = "vec", optional, tag = "1")] - pub block: ::core::option::Option<::prost::alloc::vec::Vec>, -} -/// Represents a block number. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockNumber { - /// The block number of the target block. - #[prost(fixed32, tag = "1")] - pub block_num: u32, -} -/// Represents a block number or nothing. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeBlockNumber { - /// The block number of the target block. - #[prost(fixed32, optional, tag = "1")] - pub block_num: ::core::option::Option, -} -/// Represents a block header. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockHeader { - /// Specifies the version of the protocol. - #[prost(uint32, tag = "1")] - pub version: u32, - /// The commitment of the previous blocks header. - #[prost(message, optional, tag = "2")] - pub prev_block_commitment: ::core::option::Option, - /// A unique sequential number of the current block. - #[prost(fixed32, tag = "3")] - pub block_num: u32, - /// A commitment to an MMR of the entire chain where each block is a leaf. - #[prost(message, optional, tag = "4")] - pub chain_commitment: ::core::option::Option, - /// A commitment to account database. - #[prost(message, optional, tag = "5")] - pub account_root: ::core::option::Option, - /// A commitment to the nullifier database. - #[prost(message, optional, tag = "6")] - pub nullifier_root: ::core::option::Option, - /// A commitment to all notes created in the current block. - #[prost(message, optional, tag = "7")] - pub note_root: ::core::option::Option, - /// A commitment to a set of IDs of transactions which affected accounts in this block. - #[prost(message, optional, tag = "8")] - pub tx_commitment: ::core::option::Option, - /// The validator's ECDSA public key. - #[prost(message, optional, tag = "9")] - pub validator_key: ::core::option::Option, - /// A commitment to all transaction kernels supported by this block. - #[prost(message, optional, tag = "10")] - pub tx_kernel_commitment: ::core::option::Option, - /// Fee parameters for block processing. - #[prost(message, optional, tag = "11")] - pub fee_parameters: ::core::option::Option, - /// The time when the block was created. - #[prost(fixed32, tag = "12")] - pub timestamp: u32, -} -/// Validator ECDSA public key. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ValidatorPublicKey { - /// Signature encoded using \[winter_utils::Serializable\] implementation for - /// \[crypto::dsa::ecdsa_k256_keccak::PublicKey\]. - #[prost(bytes = "vec", tag = "1")] - pub validator_key: ::prost::alloc::vec::Vec, -} -/// Block ECDSA Signature. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockSignature { - /// Signature encoded using \[winter_utils::Serializable\] implementation for - /// \[crypto::dsa::ecdsa_k256_keccak::Signature\]. - #[prost(bytes = "vec", tag = "1")] - pub signature: ::prost::alloc::vec::Vec, -} -/// Definition of the fee parameters. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct FeeParameters { - /// The faucet account ID which is used for native fee assets. - #[prost(message, optional, tag = "1")] - pub native_asset_id: ::core::option::Option, - /// The base fee (in base units) capturing the cost for the verification of a transaction. - #[prost(fixed32, tag = "2")] - pub verification_base_fee: u32, -} -/// Represents a block body. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockBody { - /// Block body data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::BlockBody\]. - #[prost(bytes = "vec", tag = "1")] - pub block_body: ::prost::alloc::vec::Vec, -} diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index 61e3a53790..63dc1dfa2c 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -1,13 +1,8 @@ -#![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] -#![allow(clippy::large_enum_variant, reason = "generated by build.rs and tonic")] +#![expect( + clippy::pedantic, + clippy::large_enum_variant, + clippy::allow_attributes, + reason = "generated by build.rs and tonic" +)] -pub mod account; -pub mod block_producer; -pub mod blockchain; -pub mod note; -pub mod primitives; -pub mod remote_prover; -pub mod rpc; -pub mod store; -pub mod transaction; -pub mod validator; +include!(concat!(env!("OUT_DIR"), "/generated/mod.rs")); diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs deleted file mode 100644 index 83d56aeb6b..0000000000 --- a/crates/proto/src/generated/note.rs +++ /dev/null @@ -1,130 +0,0 @@ -// This file is @generated by prost-build. -/// Represents a note's ID. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteId { - /// A unique identifier of the note which is a 32-byte commitment to the underlying note data. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// List of note IDs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NoteIdList { - /// List of note IDs to be queried from the database. - #[prost(message, repeated, tag = "1")] - pub ids: ::prost::alloc::vec::Vec, -} -/// Represents a note's metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteMetadata { - /// The account which sent the note. - #[prost(message, optional, tag = "1")] - pub sender: ::core::option::Option, - /// The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - #[prost(uint32, tag = "2")] - pub note_type: u32, - /// A value which can be used by the recipient(s) to identify notes intended for them. - /// - /// See `miden_protocol::note::note_tag` for more info. - #[prost(fixed32, tag = "3")] - pub tag: u32, - /// Serialized note attachment - /// - /// See `miden_protocol::note::NoteAttachment` for more info. - #[prost(bytes = "vec", tag = "4")] - pub attachment: ::prost::alloc::vec::Vec, -} -/// Represents a note. -/// -/// The note is composed of the note metadata and its serialized details. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Note { - /// The note's metadata. - #[prost(message, optional, tag = "1")] - pub metadata: ::core::option::Option, - /// Serialized note details (empty for private notes). - #[prost(bytes = "vec", optional, tag = "2")] - pub details: ::core::option::Option<::prost::alloc::vec::Vec>, -} -/// Represents a network note. -/// -/// Network notes are a subtype of public notes, and as such, their details are always publicly -/// known. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NetworkNote { - /// The note's metadata. - #[prost(message, optional, tag = "1")] - pub metadata: ::core::option::Option, - /// Serialized note details (i.e., assets and recipient). - #[prost(bytes = "vec", tag = "2")] - pub details: ::prost::alloc::vec::Vec, -} -/// Represents a committed note. -/// -/// A committed note is a note that has been included in a block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CommittedNote { - /// Either private, public, or network note. - #[prost(message, optional, tag = "1")] - pub note: ::core::option::Option, - /// The data needed to prove that the note is present in the chain. - #[prost(message, optional, tag = "2")] - pub inclusion_proof: ::core::option::Option, -} -/// Represents the result of getting committed notes. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CommittedNoteList { - /// List of committed notes. - #[prost(message, repeated, tag = "1")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Represents a proof of note's inclusion in a block. -/// -/// Does not include proof of the block's inclusion in the chain. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NoteInclusionInBlockProof { - /// A unique identifier of the note which is a 32-byte commitment to the underlying note data. - #[prost(message, optional, tag = "1")] - pub note_id: ::core::option::Option, - /// The block number in which the note was created. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - /// The index of the note in the block. - #[prost(uint32, tag = "3")] - pub note_index_in_block: u32, - /// The note's inclusion proof in the block. - #[prost(message, optional, tag = "4")] - pub inclusion_path: ::core::option::Option, -} -/// Represents proof of a note inclusion in the block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NoteSyncRecord { - /// A unique identifier of the note which is a 32-byte commitment to the underlying note data. - #[prost(message, optional, tag = "1")] - pub note_id: ::core::option::Option, - /// The index of the note in the block. - #[prost(uint32, tag = "2")] - pub note_index_in_block: u32, - /// The note's metadata. - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, - /// The note's inclusion proof in the block. - #[prost(message, optional, tag = "4")] - pub inclusion_path: ::core::option::Option, -} -/// Represents a note root. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteRoot { - /// The root of the note. - #[prost(message, optional, tag = "1")] - pub root: ::core::option::Option, -} -/// Represents a note script. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct NoteScript { - /// Entrypoint of the script. - #[prost(uint32, tag = "1")] - pub entrypoint: u32, - /// Mast of the script. - #[prost(bytes = "vec", tag = "2")] - pub mast: ::prost::alloc::vec::Vec, -} diff --git a/crates/proto/src/generated/primitives.rs b/crates/proto/src/generated/primitives.rs deleted file mode 100644 index ea7f5a1a17..0000000000 --- a/crates/proto/src/generated/primitives.rs +++ /dev/null @@ -1,98 +0,0 @@ -// This file is @generated by prost-build. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Asset { - /// Asset represented as a word. - #[prost(message, optional, tag = "1")] - pub asset: ::core::option::Option, -} -/// Represents a single SMT leaf entry. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SmtLeafEntry { - /// The key of the entry. - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option, - /// The value of the entry. - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option, -} -/// Multiple leaf entries when hash collisions occur at the same leaf position. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SmtLeafEntryList { - /// The list of entries at this leaf. - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, -} -/// A leaf in an SMT, sitting at depth 64. A leaf can contain 0, 1 or multiple leaf entries. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SmtLeaf { - #[prost(oneof = "smt_leaf::Leaf", tags = "1, 2, 3")] - pub leaf: ::core::option::Option, -} -/// Nested message and enum types in `SmtLeaf`. -pub mod smt_leaf { - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Leaf { - /// An empty leaf index. - #[prost(uint64, tag = "1")] - EmptyLeafIndex(u64), - /// A single leaf entry. - #[prost(message, tag = "2")] - Single(super::SmtLeafEntry), - /// Multiple leaf entries. - #[prost(message, tag = "3")] - Multiple(super::SmtLeafEntryList), - } -} -/// The opening of a leaf in an SMT. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SmtOpening { - /// The Merkle path to the leaf. - #[prost(message, optional, tag = "1")] - pub path: ::core::option::Option, - /// The leaf itself. - #[prost(message, optional, tag = "2")] - pub leaf: ::core::option::Option, -} -/// A different representation of a Merkle path designed for memory efficiency. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SparseMerklePath { - /// A bitmask representing empty nodes. - /// - /// The set bit corresponds to the depth of an empty node. The least significant bit (bit 0) - /// describes depth 1 node (root's children). The `bit index + 1` is equal to node's depth. - #[prost(fixed64, tag = "1")] - pub empty_nodes_mask: u64, - /// The non-empty nodes, stored in depth-order, but not contiguous across depth. - #[prost(message, repeated, tag = "2")] - pub siblings: ::prost::alloc::vec::Vec, -} -/// Represents an MMR delta. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MmrDelta { - /// The number of leaf nodes in the MMR. - #[prost(uint64, tag = "1")] - pub forest: u64, - /// New and changed MMR peaks. - #[prost(message, repeated, tag = "2")] - pub data: ::prost::alloc::vec::Vec, -} -/// Represents a Merkle path. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MerklePath { - /// List of sibling node hashes, in order from the root to the leaf. - #[prost(message, repeated, tag = "1")] - pub siblings: ::prost::alloc::vec::Vec, -} -/// A hash digest, the result of a hash function. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -#[prost(skip_debug)] -pub struct Digest { - #[prost(fixed64, tag = "1")] - pub d0: u64, - #[prost(fixed64, tag = "2")] - pub d1: u64, - #[prost(fixed64, tag = "3")] - pub d2: u64, - #[prost(fixed64, tag = "4")] - pub d3: u64, -} diff --git a/crates/proto/src/generated/remote_prover.rs b/crates/proto/src/generated/remote_prover.rs deleted file mode 100644 index b504804c3e..0000000000 --- a/crates/proto/src/generated/remote_prover.rs +++ /dev/null @@ -1,1003 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Generates a proof for the requested payload. - async fn prove( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.Api/Prove" => { - #[allow(non_camel_case_types)] - struct ProveSvc(pub Arc); - impl tonic::server::UnaryService - for ProveSvc { - type Response = super::Proof; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::prove(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ProveSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod proxy_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ProxyStatusApiServer. - #[async_trait] - pub trait ProxyStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the proxy. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct ProxyStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ProxyStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ProxyStatusApiServer - where - T: ProxyStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.ProxyStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::ProxyStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ProxyStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.ProxyStatusApi"; - impl tonic::server::NamedService for ProxyStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod worker_status_api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with WorkerStatusApiServer. - #[async_trait] - pub trait WorkerStatusApi: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status of the worker. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct WorkerStatusApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl WorkerStatusApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for WorkerStatusApiServer - where - T: WorkerStatusApi, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/remote_prover.WorkerStatusApi/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> - for StatusSvc { - type Response = super::WorkerStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for WorkerStatusApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "remote_prover.WorkerStatusApi"; - impl tonic::server::NamedService for WorkerStatusApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs deleted file mode 100644 index 798a1d18e8..0000000000 --- a/crates/proto/src/generated/rpc.rs +++ /dev/null @@ -1,2122 +0,0 @@ -// This file is @generated by prost-build. -/// Represents the status of the node. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct RpcStatus { - /// The rpc component's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The genesis commitment. - #[prost(message, optional, tag = "2")] - pub genesis_commitment: ::core::option::Option, - /// The store status. - #[prost(message, optional, tag = "3")] - pub store: ::core::option::Option, - /// The block producer status. - #[prost(message, optional, tag = "4")] - pub block_producer: ::core::option::Option, -} -/// Represents the status of the block producer. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockProducerStatus { - /// The block producer's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The block producer's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// The block producer's current view of the chain tip height. - /// - /// This is the height of the latest block that the block producer considers - /// to be part of the canonical chain. - #[prost(fixed32, tag = "4")] - pub chain_tip: u32, - /// Statistics about the mempool. - #[prost(message, optional, tag = "3")] - pub mempool_stats: ::core::option::Option, -} -/// Statistics about the mempool. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MempoolStats { - /// Number of transactions currently in the mempool waiting to be batched. - #[prost(uint64, tag = "1")] - pub unbatched_transactions: u64, - /// Number of batches currently being proven. - #[prost(uint64, tag = "2")] - pub proposed_batches: u64, - /// Number of proven batches waiting for block inclusion. - #[prost(uint64, tag = "3")] - pub proven_batches: u64, -} -/// Represents the status of the store. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StoreStatus { - /// The store's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The store's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "3")] - pub chain_tip: u32, -} -/// Returns the block header corresponding to the requested block number, as well as the merkle -/// path and current forest which validate the block's inclusion in the chain. -/// -/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockHeaderByNumberRequest { - /// The target block height, defaults to latest if not provided. - #[prost(uint32, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Whether or not to return authentication data for the block header. - #[prost(bool, optional, tag = "2")] - pub include_mmr_proof: ::core::option::Option, -} -/// Represents the result of getting a block header by block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockHeaderByNumberResponse { - /// The requested block header. - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - #[prost(message, optional, tag = "2")] - pub mmr_path: ::core::option::Option, - /// Current chain length. - #[prost(fixed32, optional, tag = "3")] - pub chain_length: ::core::option::Option, -} -/// Represents a note script or nothing. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeNoteScript { - /// The script for a note by its root. - #[prost(message, optional, tag = "1")] - pub script: ::core::option::Option, -} -/// Defines the request for account details. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountRequest { - /// ID of the account for which we want to get data - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Optional block height at which to return the proof. - /// - /// Defaults to current chain tip if unspecified. - #[prost(message, optional, tag = "2")] - pub block_num: ::core::option::Option, - /// Request for additional account details; valid only for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountRequest`. -pub mod account_request { - /// Request the details for a public account. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetailRequest { - /// Last known code commitment to the requester. The response will include account code - /// only if its commitment is different from this value. - /// - /// If the field is ommiteed, the response will not include the account code. - #[prost(message, optional, tag = "1")] - pub code_commitment: ::core::option::Option, - /// Last known asset vault commitment to the requester. The response will include asset vault data - /// only if its commitment is different from this value. If the value is not present in the - /// request, the response will not contain one either. - /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested - /// separately, which is signaled in the response message with dedicated flag. - #[prost(message, optional, tag = "2")] - pub asset_vault_commitment: ::core::option::Option< - super::super::primitives::Digest, - >, - /// Additional request per storage map. - #[prost(message, repeated, tag = "3")] - pub storage_maps: ::prost::alloc::vec::Vec< - account_detail_request::StorageMapDetailRequest, - >, - } - /// Nested message and enum types in `AccountDetailRequest`. - pub mod account_detail_request { - /// Represents a storage slot index and the associated map keys. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageMapDetailRequest { - /// Storage slot name. - #[prost(string, tag = "1")] - pub slot_name: ::prost::alloc::string::String, - #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] - pub slot_data: ::core::option::Option, - } - /// Nested message and enum types in `StorageMapDetailRequest`. - pub mod storage_map_detail_request { - /// Indirection required for use in `oneof {..}` block. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapKeys { - /// A list of map keys associated with this storage slot. - #[prost(message, repeated, tag = "1")] - pub map_keys: ::prost::alloc::vec::Vec< - super::super::super::super::primitives::Digest, - >, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SlotData { - /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - /// the response will not contain them but must be requested separately. - #[prost(bool, tag = "2")] - AllEntries(bool), - /// A list of map keys associated with the given storage slot identified by `slot_name`. - #[prost(message, tag = "3")] - MapKeys(MapKeys), - } - } - } -} -/// Represents the result of getting account proof. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountResponse { - /// The block number at which the account witness was created and the account details were observed. - #[prost(message, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Account ID, current state commitment, and SMT path. - #[prost(message, optional, tag = "2")] - pub witness: ::core::option::Option, - /// Additional details for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountResponse`. -pub mod account_response { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetails { - /// Account header. - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Account storage data - #[prost(message, optional, tag = "2")] - pub storage_details: ::core::option::Option, - /// Account code; empty if code commitments matched or none was requested. - #[prost(bytes = "vec", optional, tag = "3")] - pub code: ::core::option::Option<::prost::alloc::vec::Vec>, - /// Account asset vault data; empty if vault commitments matched or the requester - /// omitted it in the request. - #[prost(message, optional, tag = "4")] - pub vault_details: ::core::option::Option, - } -} -/// Account vault details for AccountResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountVaultDetails { - /// A flag that is set to true if the account contains too many assets. This indicates - /// to the user that `SyncAccountVault` endpoint should be used to retrieve the - /// account's assets - #[prost(bool, tag = "1")] - pub too_many_assets: bool, - /// When too_many_assets == false, this will contain the list of assets in the - /// account's vault - #[prost(message, repeated, tag = "2")] - pub assets: ::prost::alloc::vec::Vec, -} -/// Account storage details for AccountResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountStorageDetails { - /// Account storage header (storage slot info for up to 256 slots) - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Additional data for the requested storage maps - #[prost(message, repeated, tag = "2")] - pub map_details: ::prost::alloc::vec::Vec< - account_storage_details::AccountStorageMapDetails, - >, -} -/// Nested message and enum types in `AccountStorageDetails`. -pub mod account_storage_details { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountStorageMapDetails { - /// Storage slot name. - #[prost(string, tag = "1")] - pub slot_name: ::prost::alloc::string::String, - /// True when the number of entries exceeds the response limit. - /// When set, clients should use the `SyncAccountStorageMaps` endpoint. - #[prost(bool, tag = "2")] - pub too_many_entries: bool, - /// The map entries (with or without proofs). Empty when too_many_entries is true. - #[prost(oneof = "account_storage_map_details::Entries", tags = "3, 4")] - pub entries: ::core::option::Option, - } - /// Nested message and enum types in `AccountStorageMapDetails`. - pub mod account_storage_map_details { - /// Wrapper for repeated storage map entries including their proofs. - /// Used when specific keys are requested to enable client-side verification. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapEntriesWithProofs { - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec< - map_entries_with_proofs::StorageMapEntryWithProof, - >, - } - /// Nested message and enum types in `MapEntriesWithProofs`. - pub mod map_entries_with_proofs { - /// Definition of individual storage entries including a proof. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageMapEntryWithProof { - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "3")] - pub proof: ::core::option::Option< - super::super::super::super::primitives::SmtOpening, - >, - } - } - /// Wrapper for repeated storage map entries (without proofs). - /// Used when all entries are requested for small maps. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AllMapEntries { - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, - } - /// Nested message and enum types in `AllMapEntries`. - pub mod all_map_entries { - /// Definition of individual storage entries. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct StorageMapEntry { - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - } - } - /// The map entries (with or without proofs). Empty when too_many_entries is true. - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Entries { - /// All storage entries without proofs (for small maps or full requests). - #[prost(message, tag = "3")] - AllEntries(AllMapEntries), - /// Specific entries with their SMT proofs (for partial requests). - #[prost(message, tag = "4")] - EntriesWithProofs(MapEntriesWithProofs), - } - } -} -/// List of nullifiers to return proofs for. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NullifierList { - /// List of nullifiers to return proofs for. - #[prost(message, repeated, tag = "1")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of checking nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersResponse { - /// Each requested nullifier has its corresponding nullifier proof at the same position. - #[prost(message, repeated, tag = "1")] - pub proofs: ::prost::alloc::vec::Vec, -} -/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNullifiersRequest { - /// Block number from which the nullifiers are requested (inclusive). - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Number of bits used for nullifier prefix. Currently the only supported value is 16. - #[prost(uint32, tag = "2")] - pub prefix_len: u32, - /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal - /// to `prefix_len`. - #[prost(uint32, repeated, tag = "3")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNullifiersResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of nullifiers matching the prefixes specified in the request. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `SyncNullifiersResponse`. -pub mod sync_nullifiers_response { - /// Represents a single nullifier update. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierUpdate { - /// Nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// Block number. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// Account vault synchronization request. -/// -/// Allows requesters to sync asset values for specific public accounts within a block range. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncAccountVaultRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync asset vault. - #[prost(message, optional, tag = "2")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncAccountVaultResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of asset updates for the account. - /// - /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` - /// is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountVaultUpdate { - /// Vault key associated with the asset. - #[prost(message, optional, tag = "1")] - pub vault_key: ::core::option::Option, - /// Asset value related to the vault key. - /// If not present, the asset was removed from the vault. - #[prost(message, optional, tag = "2")] - pub asset: ::core::option::Option, - /// Block number at which the above asset was updated in the account vault. - #[prost(fixed32, tag = "3")] - pub block_num: u32, -} -/// Note synchronization request. -/// -/// Specifies note tags that requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNotesRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "2")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing notes request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNotesResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - /// - /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of - /// an MMR of forest `chain_tip` with this path. - #[prost(message, optional, tag = "3")] - pub mmr_path: ::core::option::Option, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "4")] - pub notes: ::prost::alloc::vec::Vec, -} -/// State synchronization request. -/// -/// Specifies state updates the requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -/// `account_ids` for that block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateRequest { - /// Last block known by the requester. The response will contain data starting from the next block, - /// until the first block which contains a note of matching the requested tag, or the chain tip - /// if there are no notes. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Accounts' commitment to include in the response. - /// - /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is - /// possible there was an update to the account for the given range, but if it is not the latest, - /// it won't be included in the response. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "3")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing state request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateResponse { - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - #[prost(message, optional, tag = "3")] - pub mmr_delta: ::core::option::Option, - /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - #[prost(message, repeated, tag = "5")] - pub accounts: ::prost::alloc::vec::Vec, - /// List of transactions executed against requested accounts between `request.block_num + 1` and - /// `response.block_header.block_num`. - #[prost(message, repeated, tag = "6")] - pub transactions: ::prost::alloc::vec::Vec, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "7")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Storage map synchronization request. -/// -/// Allows requesters to sync storage map values for specific public accounts within a block range, -/// with support for cursor-based pagination to handle large storage maps. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncAccountStorageMapsRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync storage maps. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncAccountStorageMapsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// The list of storage map updates. - /// - /// Multiple updates can be returned for a single slot index and key combination, and the one - /// with a higher `block_num` is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -/// Represents a single storage map update. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StorageMapUpdate { - /// Block number in which the slot was updated. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Storage slot name. - #[prost(string, tag = "2")] - pub slot_name: ::prost::alloc::string::String, - /// The storage map key. - #[prost(message, optional, tag = "3")] - pub key: ::core::option::Option, - /// The storage map value. - #[prost(message, optional, tag = "4")] - pub value: ::core::option::Option, -} -/// Represents a block range. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockRange { - /// Block number from which to start (inclusive). - #[prost(fixed32, tag = "1")] - pub block_from: u32, - /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. - #[prost(fixed32, optional, tag = "2")] - pub block_to: ::core::option::Option, -} -/// Represents pagination information for chunked responses. -/// -/// Pagination is done using block numbers as the axis, allowing requesters to request -/// data in chunks by specifying block ranges and continuing from where the previous -/// response left off. -/// -/// To request the next chunk, the requester should use `block_num + 1` from the previous response -/// as the `block_from` for the next request. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct PaginationInfo { - /// Current chain tip - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// The block number of the last check included in this response. - /// - /// For chunked responses, this may be less than `request.block_range.block_to`. - /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request - /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - #[prost(fixed32, tag = "2")] - pub block_num: u32, -} -/// Transactions synchronization request. -/// -/// Allows requesters to sync transactions for specific accounts within a block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Accounts to sync transactions for. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing transactions request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of transaction records. - #[prost(message, repeated, tag = "2")] - pub transactions: ::prost::alloc::vec::Vec, -} -/// Represents a transaction record. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionRecord { - /// Block number in which the transaction was included. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// A transaction header. - #[prost(message, optional, tag = "2")] - pub header: ::core::option::Option, -} -/// Represents the query parameter limits for RPC endpoints. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RpcLimits { - /// Maps RPC endpoint names to their parameter limits. - /// Key: endpoint name (e.g., "CheckNullifiers", "SyncState") - /// Value: map of parameter names to their limit values - #[prost(map = "string, message", tag = "1")] - pub endpoints: ::std::collections::HashMap< - ::prost::alloc::string::String, - EndpointLimits, - >, -} -/// Represents the parameter limits for a single endpoint. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EndpointLimits { - /// Maps parameter names to their limit values. - /// Key: parameter name (e.g., "nullifier", "account_id") - /// Value: limit value - #[prost(map = "string, uint32", tag = "1")] - pub parameters: ::std::collections::HashMap<::prost::alloc::string::String, u32>, -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// RPC API for the RPC component - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info of the node. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "Status")); - self.inner.unary(req, path, codec).await - } - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) - /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. - /// - /// Verify proofs against the nullifier tree root in the latest block header. - pub async fn check_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/CheckNullifiers"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "CheckNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest details of the specified account. - pub async fn get_account( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetAccount"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccount")); - self.inner.unary(req, path, codec).await - } - /// Returns raw block data for the specified block number. - pub async fn get_block_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetBlockByNumber"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetBlockByNumber")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of notes matching the provided note IDs. - pub async fn get_notes_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetNotesById"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetNotesById")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Submits proven transaction to the Miden network. Returns the node's current block height. - pub async fn submit_proven_transaction( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransaction, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/SubmitProvenTransaction", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "SubmitProvenTransaction")); - self.inner.unary(req, path, codec).await - } - /// Submits a proven batch of transactions to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - pub async fn submit_proven_batch( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/SubmitProvenBatch", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SubmitProvenBatch")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - pub async fn sync_state( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); - self.inner.unary(req, path, codec).await - } - /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_account_storage_maps( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/SyncAccountStorageMaps", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); - self.inner.unary(req, path, codec).await - } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - pub async fn get_limits( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info of the node. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) - /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. - /// - /// Verify proofs against the nullifier tree root in the latest block header. - async fn check_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest details of the specified account. - async fn get_account( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns raw block data for the specified block number. - async fn get_block_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of notes matching the provided note IDs. - async fn get_notes_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Submits proven transaction to the Miden network. Returns the node's current block height. - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits a proven batch of transactions to the Miden network. - /// - /// The batch may include transactions which were are: - /// - /// * already in the mempool i.e. previously successfully submitted - /// * will be submitted to the mempool in the future - /// * won't be submitted to the mempool at all - /// - /// All transactions in the batch but not in the mempool must build on the current mempool - /// state following normal transaction submission rules. - /// - /// Returns the node's current block height. - async fn submit_proven_batch( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - async fn sync_state( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_account_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - async fn get_limits( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - } - /// RPC API for the RPC component - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/rpc.Api/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::RpcStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/CheckNullifiers" => { - #[allow(non_camel_case_types)] - struct CheckNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::CheckNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::check_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = CheckNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetAccount" => { - #[allow(non_camel_case_types)] - struct GetAccountSvc(pub Arc); - impl tonic::server::UnaryService - for GetAccountSvc { - type Response = super::AccountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetBlockByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockByNumberSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetBlockByNumberSvc { - type Response = super::super::blockchain::MaybeBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::BlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_by_number(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetBlockHeaderByNumberSvc { - type Response = super::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetNotesById" => { - #[allow(non_camel_case_types)] - struct GetNotesByIdSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetNotesByIdSvc { - type Response = super::super::note::CommittedNoteList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_notes_by_id(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNotesByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SubmitProvenTransaction" => { - #[allow(non_camel_case_types)] - struct SubmitProvenTransactionSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransaction, - > for SubmitProvenTransactionSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransaction, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_transaction(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenTransactionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SubmitProvenBatch" => { - #[allow(non_camel_case_types)] - struct SubmitProvenBatchSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransactionBatch, - > for SubmitProvenBatchSvc { - type Response = super::super::blockchain::BlockNumber; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransactionBatch, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_batch(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenBatchSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncNullifiers" => { - #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncAccountVault" => { - #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_vault(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountVaultSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncNotes" => { - #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_notes(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncState" => { - #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_state(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStateSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncAccountStorageMaps" => { - #[allow(non_camel_case_types)] - struct SyncAccountStorageMapsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncAccountStorageMapsSvc { - type Response = super::SyncAccountStorageMapsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_storage_maps(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountStorageMapsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/SyncTransactions" => { - #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetLimits" => { - #[allow(non_camel_case_types)] - struct GetLimitsSvc(pub Arc); - impl tonic::server::UnaryService<()> for GetLimitsSvc { - type Response = super::RpcLimits; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_limits(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetLimitsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "rpc.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs deleted file mode 100644 index be9d1d6469..0000000000 --- a/crates/proto/src/generated/store.rs +++ /dev/null @@ -1,3198 +0,0 @@ -// This file is @generated by prost-build. -/// Returns data required to prove the next block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputsRequest { - /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - #[prost(message, repeated, tag = "1")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. - /// - /// Due to note erasure it will generally not be possible to know the exact set of nullifiers - /// a block will create, unless we pre-execute note erasure. So in practice, this set of - /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a - /// superset of the nullifiers the block may create. - /// - /// However, if it is known that a certain note will be erased, it would not be necessary to - /// provide a nullifier witness for it. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, - /// Array of block numbers referenced by all batches in the block. - #[prost(fixed32, repeated, tag = "4")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting block inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputs { - /// The latest block header. - #[prost(message, optional, tag = "1")] - pub latest_block_header: ::core::option::Option, - /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - /// the store**. - #[prost(message, repeated, tag = "2")] - pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< - super::note::NoteInclusionInBlockProof, - >, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the requested blocks - /// referenced by the batches in the block. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, - /// The state commitments of the requested accounts and their authentication paths. - #[prost(message, repeated, tag = "4")] - pub account_witnesses: ::prost::alloc::vec::Vec, - /// The requested nullifiers and their authentication paths. - #[prost(message, repeated, tag = "5")] - pub nullifier_witnesses: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `BlockInputs`. -pub mod block_inputs { - /// A nullifier returned as a response to the `GetBlockInputs`. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct NullifierWitness { - /// The nullifier. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. - #[prost(message, optional, tag = "2")] - pub opening: ::core::option::Option, - } -} -/// Returns the inputs for a transaction batch. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputsRequest { - /// List of unauthenticated note commitments to be queried from the database. - #[prost(message, repeated, tag = "1")] - pub note_commitments: ::prost::alloc::vec::Vec, - /// Set of block numbers referenced by transactions. - #[prost(fixed32, repeated, tag = "2")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting batch inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputs { - /// The block header that the transaction batch should reference. - #[prost(message, optional, tag = "1")] - pub batch_reference_block_header: ::core::option::Option< - super::blockchain::BlockHeader, - >, - /// Proof of each *found* unauthenticated note's inclusion in a block. - #[prost(message, repeated, tag = "2")] - pub note_proofs: ::prost::alloc::vec::Vec, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced - /// by the transactions in the batch. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, -} -/// Returns data required to validate a new transaction. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputsRequest { - /// ID of the account against which a transaction is executed. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Set of nullifiers consumed by this transaction. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Set of unauthenticated note commitments to check for existence on-chain. - /// - /// These are notes which were not on-chain at the state the transaction was proven, - /// but could by now be present. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting transaction inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputs { - /// Account state proof. - #[prost(message, optional, tag = "1")] - pub account_state: ::core::option::Option< - transaction_inputs::AccountTransactionInputRecord, - >, - /// List of nullifiers that have been consumed. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec< - transaction_inputs::NullifierTransactionInputRecord, - >, - /// List of unauthenticated notes that were not found in the database. - #[prost(message, repeated, tag = "3")] - pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, - /// The node's current block height. - #[prost(fixed32, tag = "4")] - pub block_height: u32, - /// Whether the account ID prefix is unique. Only relevant for account creation requests. - /// - /// TODO: Replace this with an error. When a general error message exists. - #[prost(bool, optional, tag = "5")] - pub new_account_id_prefix_is_unique: ::core::option::Option, -} -/// Nested message and enum types in `TransactionInputs`. -pub mod transaction_inputs { - /// An account returned as a response to the `GetTransactionInputs`. - #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] - pub struct AccountTransactionInputRecord { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The latest account commitment, zero commitment if the account doesn't exist. - #[prost(message, optional, tag = "2")] - pub account_commitment: ::core::option::Option, - } - /// A nullifier returned as a response to the `GetTransactionInputs`. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierTransactionInputRecord { - /// The nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The block at which the nullifier has been consumed, zero if not consumed. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// Represents the result of getting network account details by ID. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeAccountDetails { - /// Account details. - #[prost(message, optional, tag = "1")] - pub details: ::core::option::Option, -} -/// Returns a paginated list of unconsumed network notes for an account. -/// -/// Notes created or consumed after the specified block are excluded from the result. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesRequest { - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - /// - /// Note that this token is only valid if used with the same parameters. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, - /// The full account ID to filter notes by. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, - /// The block number to filter the returned notes by. - /// - /// Notes that are created or consumed after this block are excluded from the result. - #[prost(fixed32, tag = "4")] - pub block_num: u32, -} -/// Represents the result of getting the unconsumed network notes. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnconsumedNetworkNotes { - /// An opaque pagination token. - /// - /// Use this in your next request to get the next - /// set of data. - /// - /// Will be null once there is no more data remaining. - #[prost(uint64, optional, tag = "1")] - pub next_token: ::core::option::Option, - /// The list of unconsumed network notes. - #[prost(message, repeated, tag = "2")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting the network account ids. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NetworkAccountIdList { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// The list of network account ids. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, -} -/// Current blockchain data based on the requested block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CurrentBlockchainData { - /// Commitments that represent the current state according to the MMR. - #[prost(message, repeated, tag = "1")] - pub current_peaks: ::prost::alloc::vec::Vec, - /// Current block header. - #[prost(message, optional, tag = "2")] - pub current_block_header: ::core::option::Option, -} -/// Request for vault asset witnesses for a specific account. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct VaultAssetWitnessesRequest { - /// The account ID for which to retrieve vault asset witnesses. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Set of asset vault keys to retrieve witnesses for. - #[prost(message, repeated, tag = "2")] - pub vault_keys: ::prost::alloc::vec::Vec, - /// The witnesses returned correspond to the account state at the specified block number. - /// - /// Optional block number. If not provided, uses the latest state. - /// - /// The specified block number should be relatively near the chain tip else an error will be - /// returned. - #[prost(fixed32, optional, tag = "3")] - pub block_num: ::core::option::Option, -} -/// Response containing vault asset witnesses. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct VaultAssetWitnessesResponse { - /// Block number at which the witnesses were generated. - /// - /// The witnesses returned corresponds to the account state at the specified block number. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// List of asset witnesses. - #[prost(message, repeated, tag = "2")] - pub asset_witnesses: ::prost::alloc::vec::Vec< - vault_asset_witnesses_response::VaultAssetWitness, - >, -} -/// Nested message and enum types in `VaultAssetWitnessesResponse`. -pub mod vault_asset_witnesses_response { - /// A vault asset witness containing the asset and its proof. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct VaultAssetWitness { - /// The SMT opening proof for the asset's inclusion in the vault. - #[prost(message, optional, tag = "1")] - pub proof: ::core::option::Option, - } -} -/// Request for a storage map witness for a specific account and storage slot. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StorageMapWitnessRequest { - /// The account ID for which to retrieve the storage map witness. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The raw, user-provided storage map key for which to retrieve the witness. - #[prost(message, optional, tag = "2")] - pub map_key: ::core::option::Option, - /// Optional block number. If not provided, uses the latest state. - /// - /// The witness returned corresponds to the account state at the specified block number. - /// - /// The specified block number should be relatively near the chain tip else an error will be - /// returned. - #[prost(fixed32, optional, tag = "3")] - pub block_num: ::core::option::Option, - /// The storage slot name for the map. - #[prost(string, tag = "4")] - pub slot_name: ::prost::alloc::string::String, -} -/// Response containing a storage map witness. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StorageMapWitnessResponse { - /// The storage map witness. - #[prost(message, optional, tag = "1")] - pub witness: ::core::option::Option, - /// Block number at which the witness was generated. - #[prost(fixed32, tag = "2")] - pub block_num: u32, -} -/// Nested message and enum types in `StorageMapWitnessResponse`. -pub mod storage_map_witness_response { - /// Storage map witness data. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageWitness { - /// The raw, user-provided storage map key. - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option, - /// The SMT opening proof for the key-value pair. - #[prost(message, optional, tag = "3")] - pub proof: ::core::option::Option, - } -} -/// Generated client implementations. -pub mod rpc_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the RPC component - #[derive(Debug, Clone)] - pub struct RpcClient { - inner: tonic::client::Grpc, - } - impl RpcClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl RpcClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> RpcClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - RpcClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "Status")); - self.inner.unary(req, path, codec).await - } - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof - /// * `single` or `multiple`: Inclusion proof if the nullifier key is present - /// - /// Verify proofs against the nullifier tree root in the latest block header. - pub async fn check_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/CheckNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "CheckNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest details the specified account. - pub async fn get_account( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetAccount"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetAccount")); - self.inner.unary(req, path, codec).await - } - /// Returns raw block data for the specified block number. - pub async fn get_block_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetBlockByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "GetBlockByNumber")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of committed notes matching the provided note IDs. - pub async fn get_notes_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetNotesById"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetNotesById")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNullifiers"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNotes"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNotes")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - pub async fn sync_state( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncState"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncState")); - self.inner.unary(req, path, codec).await - } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/SyncAccountVault", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "SyncAccountVault")); - self.inner.unary(req, path, codec).await - } - /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_account_storage_maps( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::SyncAccountStorageMapsRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/SyncAccountStorageMaps", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "SyncAccountStorageMaps")); - self.inner.unary(req, path, codec).await - } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/SyncTransactions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod rpc_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. - #[async_trait] - pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a Sparse Merkle Tree opening proof for each requested nullifier - /// - /// Each proof demonstrates either: - /// - /// * **Inclusion**: Nullifier exists in the tree (note was consumed) - /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) - /// - /// The `leaf` field indicates the status: - /// - /// * `empty_leaf_index`: Non-inclusion proof - /// * `single` or `multiple`: Inclusion proof if the nullifier key is present - /// - /// Verify proofs against the nullifier tree root in the latest block header. - async fn check_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest details the specified account. - async fn get_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns raw block data for the specified block number. - async fn get_block_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of committed notes matching the provided note IDs. - async fn get_notes_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - async fn sync_state( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_account_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the RPC component - #[derive(Debug)] - pub struct RpcServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl RpcServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for RpcServer - where - T: Rpc, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/store.Rpc/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::super::rpc::StoreStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/CheckNullifiers" => { - #[allow(non_camel_case_types)] - struct CheckNullifiersSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::super::rpc::CheckNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::check_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = CheckNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetAccount" => { - #[allow(non_camel_case_types)] - struct GetAccountSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetAccountSvc { - type Response = super::super::rpc::AccountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetBlockByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetBlockByNumberSvc { - type Response = super::super::blockchain::MaybeBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::BlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_by_number(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::rpc::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetNotesById" => { - #[allow(non_camel_case_types)] - struct GetNotesByIdSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNotesByIdSvc { - type Response = super::super::note::CommittedNoteList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_notes_by_id(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNotesByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::rpc::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncNullifiers" => { - #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncNullifiersRequest, - > for SyncNullifiersSvc { - type Response = super::super::rpc::SyncNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncNullifiersRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncNotes" => { - #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::super::rpc::SyncNotesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_notes(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncState" => { - #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncStateSvc { - type Response = super::super::rpc::SyncStateResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_state(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStateSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncAccountVault" => { - #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncAccountVaultRequest, - > for SyncAccountVaultSvc { - type Response = super::super::rpc::SyncAccountVaultResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncAccountVaultRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_vault(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountVaultSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncAccountStorageMaps" => { - #[allow(non_camel_case_types)] - struct SyncAccountStorageMapsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncAccountStorageMapsRequest, - > for SyncAccountStorageMapsSvc { - type Response = super::super::rpc::SyncAccountStorageMapsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncAccountStorageMapsRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_storage_maps(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountStorageMapsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/SyncTransactions" => { - #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::rpc::SyncTransactionsRequest, - > for SyncTransactionsSvc { - type Response = super::super::rpc::SyncTransactionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::SyncTransactionsRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for RpcServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "store.Rpc"; - impl tonic::server::NamedService for RpcServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod block_producer_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the BlockProducer component - #[derive(Debug, Clone)] - pub struct BlockProducerClient { - inner: tonic::client::Grpc, - } - impl BlockProducerClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl BlockProducerClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> BlockProducerClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - BlockProducerClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Applies changes of a new block to the DB and in-memory data structures. - pub async fn apply_block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/ApplyBlock", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "ApplyBlock")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("store.BlockProducer", "GetBlockHeaderByNumber"), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to prove the next block. - pub async fn get_block_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetBlockInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "GetBlockInputs")); - self.inner.unary(req, path, codec).await - } - /// Returns the inputs for a transaction batch. - pub async fn get_batch_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetBatchInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "GetBatchInputs")); - self.inner.unary(req, path, codec).await - } - /// Returns data required to validate a new transaction. - pub async fn get_transaction_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.BlockProducer/GetTransactionInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.BlockProducer", "GetTransactionInputs")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod block_producer_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. - #[async_trait] - pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { - /// Applies changes of a new block to the DB and in-memory data structures. - async fn apply_block( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns data required to prove the next block. - async fn get_block_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns the inputs for a transaction batch. - async fn get_batch_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns data required to validate a new transaction. - async fn get_transaction_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the BlockProducer component - #[derive(Debug)] - pub struct BlockProducerServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl BlockProducerServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for BlockProducerServer - where - T: BlockProducer, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/store.BlockProducer/ApplyBlock" => { - #[allow(non_camel_case_types)] - struct ApplyBlockSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for ApplyBlockSvc { - type Response = (); - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::apply_block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ApplyBlockSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService< - super::super::rpc::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::rpc::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetBlockInputs" => { - #[allow(non_camel_case_types)] - struct GetBlockInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBlockInputsSvc { - type Response = super::BlockInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetBatchInputs" => { - #[allow(non_camel_case_types)] - struct GetBatchInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBatchInputsSvc { - type Response = super::BatchInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_batch_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBatchInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.BlockProducer/GetTransactionInputs" => { - #[allow(non_camel_case_types)] - struct GetTransactionInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetTransactionInputsSvc { - type Response = super::TransactionInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_transaction_inputs( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetTransactionInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for BlockProducerServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "store.BlockProducer"; - impl tonic::server::NamedService for BlockProducerServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated client implementations. -pub mod ntx_builder_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the network transaction builder component - #[derive(Debug, Clone)] - pub struct NtxBuilderClient { - inner: tonic::client::Grpc, - } - impl NtxBuilderClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl NtxBuilderClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> NtxBuilderClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of unconsumed network notes. - pub async fn get_unconsumed_network_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetUnconsumedNetworkNotes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("store.NtxBuilder", "GetUnconsumedNetworkNotes"), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - pub async fn get_current_blockchain_data( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetCurrentBlockchainData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetCurrentBlockchainData")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of a network account with the specified account ID. - pub async fn get_network_account_details_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetNetworkAccountDetailsById", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountDetailsById"), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a list of all network account ids. - pub async fn get_network_account_ids( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetNetworkAccountIds", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountIds")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest details of the specified account. - pub async fn get_account( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetAccount", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetAccount")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Returns vault asset witnesses for the specified account. - pub async fn get_vault_asset_witnesses( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetVaultAssetWitnesses", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetVaultAssetWitnesses")); - self.inner.unary(req, path, codec).await - } - /// Returns a storage map witness for the specified account and storage map entry. - pub async fn get_storage_map_witness( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetStorageMapWitness", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.NtxBuilder", "GetStorageMapWitness")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod ntx_builder_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. - #[async_trait] - pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of unconsumed network notes. - async fn get_unconsumed_network_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - async fn get_current_blockchain_data( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of a network account with the specified account ID. - async fn get_network_account_details_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of all network account ids. - async fn get_network_account_ids( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest details of the specified account. - async fn get_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns vault asset witnesses for the specified account. - async fn get_vault_asset_witnesses( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a storage map witness for the specified account and storage map entry. - async fn get_storage_map_witness( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the network transaction builder component - #[derive(Debug)] - pub struct NtxBuilderServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl NtxBuilderServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for NtxBuilderServer - where - T: NtxBuilder, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/store.NtxBuilder/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::rpc::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::rpc::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::rpc::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetUnconsumedNetworkNotes" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetUnconsumedNetworkNotesSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetCurrentBlockchainData" => { - #[allow(non_camel_case_types)] - struct GetCurrentBlockchainDataSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::blockchain::MaybeBlockNumber, - > for GetCurrentBlockchainDataSvc { - type Response = super::CurrentBlockchainData; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::MaybeBlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_current_blockchain_data( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetCurrentBlockchainDataSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetNetworkAccountDetailsById" => { - #[allow(non_camel_case_types)] - struct GetNetworkAccountDetailsByIdSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountDetailsByIdSvc { - type Response = super::MaybeAccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_network_account_details_by_id( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNetworkAccountDetailsByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetNetworkAccountIds" => { - #[allow(non_camel_case_types)] - struct GetNetworkAccountIdsSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountIdsSvc { - type Response = super::NetworkAccountIdList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_network_account_ids(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNetworkAccountIdsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetAccount" => { - #[allow(non_camel_case_types)] - struct GetAccountSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetAccountSvc { - type Response = super::super::rpc::AccountResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::rpc::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetVaultAssetWitnesses" => { - #[allow(non_camel_case_types)] - struct GetVaultAssetWitnessesSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetVaultAssetWitnessesSvc { - type Response = super::VaultAssetWitnessesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_vault_asset_witnesses( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetVaultAssetWitnessesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.NtxBuilder/GetStorageMapWitness" => { - #[allow(non_camel_case_types)] - struct GetStorageMapWitnessSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetStorageMapWitnessSvc { - type Response = super::StorageMapWitnessResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_storage_map_witness(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetStorageMapWitnessSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for NtxBuilderServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "store.NtxBuilder"; - impl tonic::server::NamedService for NtxBuilderServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/transaction.rs b/crates/proto/src/generated/transaction.rs deleted file mode 100644 index a9dc784d68..0000000000 --- a/crates/proto/src/generated/transaction.rs +++ /dev/null @@ -1,59 +0,0 @@ -// This file is @generated by prost-build. -/// Submits proven transaction to the Miden network. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProvenTransaction { - /// Transaction encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. - #[prost(bytes = "vec", tag = "1")] - pub transaction: ::prost::alloc::vec::Vec, - /// Transaction inputs encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::transaction::TransactionInputs\]. - #[prost(bytes = "vec", optional, tag = "2")] - pub transaction_inputs: ::core::option::Option<::prost::alloc::vec::Vec>, -} -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProvenTransactionBatch { - /// Encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. - #[prost(bytes = "vec", tag = "1")] - pub encoded: ::prost::alloc::vec::Vec, -} -/// Represents a transaction ID. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct TransactionId { - /// The transaction ID. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Represents a transaction summary. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct TransactionSummary { - /// A unique 32-byte identifier of a transaction. - #[prost(message, optional, tag = "1")] - pub transaction_id: ::core::option::Option, - /// The block number in which the transaction was executed. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - /// The ID of the account affected by the transaction. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, -} -/// Represents a transaction header. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionHeader { - /// ID of the account against which the transaction was executed. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// State commitment of the account before the transaction was executed. - #[prost(message, optional, tag = "2")] - pub initial_state_commitment: ::core::option::Option, - /// State commitment of the account after the transaction was executed. - #[prost(message, optional, tag = "3")] - pub final_state_commitment: ::core::option::Option, - /// Nullifiers of the input notes of the transaction. - #[prost(message, repeated, tag = "4")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Output notes of the transaction. - #[prost(message, repeated, tag = "5")] - pub output_notes: ::prost::alloc::vec::Vec, -} diff --git a/crates/proto/src/generated/validator.rs b/crates/proto/src/generated/validator.rs deleted file mode 100644 index 39869d9fc3..0000000000 --- a/crates/proto/src/generated/validator.rs +++ /dev/null @@ -1,457 +0,0 @@ -// This file is @generated by prost-build. -/// Represents the status of the validator. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ValidatorStatus { - /// The validator's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The validator's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Validator API for the Validator component. - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info of the validator. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/validator.Api/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("validator.Api", "Status")); - self.inner.unary(req, path, codec).await - } - /// Submits a transaction to the validator. - pub async fn submit_proven_transaction( - &mut self, - request: impl tonic::IntoRequest< - super::super::transaction::ProvenTransaction, - >, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/validator.Api/SubmitProvenTransaction", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("validator.Api", "SubmitProvenTransaction")); - self.inner.unary(req, path, codec).await - } - /// Validates a proposed block and returns the block header and body. - pub async fn sign_block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/validator.Api/SignBlock"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("validator.Api", "SignBlock")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod api_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with ApiServer. - #[async_trait] - pub trait Api: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info of the validator. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Submits a transaction to the validator. - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Validates a proposed block and returns the block header and body. - async fn sign_block( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Validator API for the Validator component. - #[derive(Debug)] - pub struct ApiServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl ApiServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for ApiServer - where - T: Api, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/validator.Api/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::ValidatorStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/validator.Api/SubmitProvenTransaction" => { - #[allow(non_camel_case_types)] - struct SubmitProvenTransactionSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::transaction::ProvenTransaction, - > for SubmitProvenTransactionSvc { - type Response = (); - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::transaction::ProvenTransaction, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::submit_proven_transaction(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SubmitProvenTransactionSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/validator.Api/SignBlock" => { - #[allow(non_camel_case_types)] - struct SignBlockSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::blockchain::ProposedBlock, - > for SignBlockSvc { - type Response = super::super::blockchain::BlockSignature; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::ProposedBlock, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sign_block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SignBlockSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for ApiServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "validator.Api"; - impl tonic::server::NamedService for ApiServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index f73600f276..3edb6ea546 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -21,9 +21,9 @@ std = ["miden-protocol/std", "miden-tx/std"] tx-prover = ["dep:miden-protocol", "dep:miden-tx", "dep:tokio"] [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] -getrandom = { features = ["wasm_js"], version = "0.3" } +getrandom = { features = ["wasm_js"], version = "0.4" } tonic = { features = ["codegen"], workspace = true } -tonic-web-wasm-client = { default-features = false, version = "0.8" } +tonic-web-wasm-client = { default-features = false, version = "0.9" } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } @@ -41,7 +41,14 @@ tokio = { default-features = false, features = ["sync"], optional = tru tonic-prost = { workspace = true } [build-dependencies] +build-rs = { workspace = true } fs-err = { workspace = true } miden-node-proto-build = { workspace = true } miette = { features = ["fancy"], version = "7.5" } tonic-prost-build = { workspace = true } + +[package.metadata.cargo-machete] +ignored = [ + "prost", + "tonic-prost", # used in generated OUT_DIR code +] diff --git a/crates/remote-prover-client/build.rs b/crates/remote-prover-client/build.rs index ffd9b2e711..aab6681865 100644 --- a/crates/remote-prover-client/build.rs +++ b/crates/remote-prover-client/build.rs @@ -1,37 +1,33 @@ -use std::fs; use std::io::Write; +use std::path::Path; +use fs_err as fs; use miden_node_proto_build::remote_prover_api_descriptor; -use miette::IntoDiagnostic; +use miette::{Context, IntoDiagnostic}; use tonic_prost_build::FileDescriptorSet; -/// Defines whether the build script should generate files in `/src`. -/// -/// The docs.rs build pipeline has a read-only filesystem, so we have to avoid writing to `src`, -/// otherwise the docs will fail to build there. Note that writing to `OUT_DIR` is fine. -const BUILD_GENERATED_FILES_IN_SRC: bool = option_env!("BUILD_PROTO").is_some(); - -const GENERATED_OUT_DIR: &str = "src/remote_prover/generated"; - /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); - if !BUILD_GENERATED_FILES_IN_SRC { - return Ok(()); - } + let dst_dir = build_rs::input::out_dir().join("generated"); + + // Remove all existing files. + let _ = fs::remove_dir_all(&dst_dir); + fs::create_dir(&dst_dir) + .into_diagnostic() + .wrap_err("creating destination folder")?; let remote_prover_descriptor = remote_prover_api_descriptor(); // Build std version - let std_path = format!("{GENERATED_OUT_DIR}/std"); - build_tonic_from_descriptor(remote_prover_descriptor.clone(), std_path, true)?; + let std_path = dst_dir.join("std"); + build_tonic_from_descriptor(remote_prover_descriptor.clone(), &std_path, true)?; // Build nostd version - let nostd_path = format!("{GENERATED_OUT_DIR}/nostd"); - build_tonic_from_descriptor(remote_prover_descriptor, nostd_path.clone(), false)?; + let nostd_path = dst_dir.join("nostd"); + build_tonic_from_descriptor(remote_prover_descriptor, &nostd_path, false)?; // Convert nostd version to use core/alloc instead of std - let nostd_file_path = format!("{nostd_path}/remote_prover.rs"); + let nostd_file_path = nostd_path.join("remote_prover.rs"); convert_to_nostd(&nostd_file_path)?; Ok(()) @@ -43,11 +39,12 @@ fn main() -> miette::Result<()> { /// Builds tonic code from a `FileDescriptorSet` with specified configuration fn build_tonic_from_descriptor( descriptor: FileDescriptorSet, - out_dir: String, + dst_dir: &Path, build_transport: bool, ) -> miette::Result<()> { + fs::create_dir_all(dst_dir).into_diagnostic()?; tonic_prost_build::configure() - .out_dir(out_dir) + .out_dir(dst_dir) .build_server(false) .build_transport(build_transport) .compile_fds_with_config(descriptor, tonic_prost_build::Config::new()) @@ -55,7 +52,7 @@ fn build_tonic_from_descriptor( } /// Replaces std references with core and alloc for nostd compatibility -fn convert_to_nostd(file_path: &str) -> miette::Result<()> { +fn convert_to_nostd(file_path: &Path) -> miette::Result<()> { let file_content = fs_err::read_to_string(file_path).into_diagnostic()?; let updated_content = file_content .replace("std::result", "core::result") diff --git a/crates/remote-prover-client/src/lib.rs b/crates/remote-prover-client/src/lib.rs index d2e0d01823..a319793d9d 100644 --- a/crates/remote-prover-client/src/lib.rs +++ b/crates/remote-prover-client/src/lib.rs @@ -2,7 +2,7 @@ // We allow unused imports here in order because this `macro_use` only makes sense for code // generated by prost under certain circumstances (when `tx-prover` is enabled and the environment // is not wasm) -#![allow(unused_imports)] +#![expect(unused_imports)] #[macro_use] extern crate alloc; @@ -15,7 +15,14 @@ extern crate std; use thiserror::Error; -pub mod remote_prover; +mod remote_prover; + +#[cfg(feature = "batch-prover")] +pub use remote_prover::batch_prover::RemoteBatchProver; +#[cfg(feature = "block-prover")] +pub use remote_prover::block_prover::RemoteBlockProver; +#[cfg(feature = "tx-prover")] +pub use remote_prover::tx_prover::RemoteTransactionProver; /// ERRORS /// =============================================================================================== diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index d1fa435486..c1562e5975 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -105,7 +105,7 @@ impl RemoteBlockProver { pub async fn prove( &self, tx_batches: OrderedBatches, - block_header: BlockHeader, + block_header: &BlockHeader, block_inputs: BlockInputs, ) -> Result { use miden_protocol::utils::Serializable; diff --git a/crates/remote-prover-client/src/remote_prover/generated/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/mod.rs index 806afe9030..2cd709029b 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/mod.rs @@ -1,4 +1,5 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[cfg(all(feature = "std", target_arch = "wasm32"))] compile_error!("The `std` feature cannot be used when targeting `wasm32`."); diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs index 50f334d6af..16cf30145b 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/nostd/mod.rs @@ -1,2 +1,4 @@ #[rustfmt::skip] -pub mod remote_prover; +pub mod remote_prover { + include!(concat!(env!("OUT_DIR"), "/generated/nostd/remote_prover.rs")); +} diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs deleted file mode 100644 index 1074dd5b8e..0000000000 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs +++ /dev/null @@ -1,442 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + core::marker::Send + 'static, - ::Error: Into + core::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + core::marker::Send + core::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> core::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - alloc::format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + core::marker::Send + 'static, - ::Error: Into + core::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + core::marker::Send + core::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> core::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - alloc::format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + core::marker::Send + 'static, - ::Error: Into + core::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + core::marker::Send + core::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> core::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - alloc::format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs index 50f334d6af..0f91ccd1d1 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/std/mod.rs @@ -1,2 +1,4 @@ #[rustfmt::skip] -pub mod remote_prover; +pub mod remote_prover { + include!(concat!(env!("OUT_DIR"), "/generated/std/remote_prover.rs")); +} diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs deleted file mode 100644 index 7be124daad..0000000000 --- a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs +++ /dev/null @@ -1,475 +0,0 @@ -// This file is @generated by prost-build. -/// Request message for proof generation containing payload and proof type metadata. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProofRequest { - /// Type of proof being requested, determines payload interpretation - #[prost(enumeration = "ProofType", tag = "1")] - pub proof_type: i32, - /// Serialized payload requiring proof generation. The encoding format is - /// type-specific: - /// - /// * TRANSACTION: TransactionInputs encoded. - /// * BATCH: ProposedBatch encoded. - /// * BLOCK: BlockProofRequest encoded. - #[prost(bytes = "vec", tag = "2")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Response message containing the generated proof. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Proof { - /// Serialized proof bytes. - /// - /// * TRANSACTION: Returns an encoded ProvenTransaction. - /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded BlockProof. - #[prost(bytes = "vec", tag = "1")] - pub payload: ::prost::alloc::vec::Vec, -} -/// Status of an individual worker in the proxy. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ProxyWorkerStatus { - /// The name of the worker. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the worker. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// The health status of the worker. - #[prost(enumeration = "WorkerHealthStatus", tag = "3")] - pub status: i32, -} -/// Response message containing the status of the proxy. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProxyStatus { - /// The version of the proxy. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this proxy. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, - /// The list of workers managed by this proxy. - #[prost(message, repeated, tag = "3")] - pub workers: ::prost::alloc::vec::Vec, -} -/// Response message containing the status of the worker. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct WorkerStatus { - /// The version of the worker. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The proof type supported by this worker. - #[prost(enumeration = "ProofType", tag = "2")] - pub supported_proof_type: i32, -} -/// Enumeration of supported proof types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ProofType { - /// Proof for a single transaction. - Transaction = 0, - /// Proof covering a batch of transactions. - Batch = 1, - /// Proof for entire block validity. - Block = 2, -} -impl ProofType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Transaction => "TRANSACTION", - Self::Batch => "BATCH", - Self::Block => "BLOCK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TRANSACTION" => Some(Self::Transaction), - "BATCH" => Some(Self::Batch), - "BLOCK" => Some(Self::Block), - _ => None, - } - } -} -/// Health status of a worker. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum WorkerHealthStatus { - /// The worker's health status is unknown. - /// This value is used when the proxy is not able to determine the health status of the worker. - /// It is only a temporary state and the proxy will eventually determine the health status of the worker. - Unknown = 0, - /// The worker is healthy. - /// This value is used when the worker is able to successfully process requests. - Healthy = 1, - /// The worker is unhealthy. - /// This value is used when the worker is not receiving requests or is not able to successfully process requests. - Unhealthy = 2, -} -impl WorkerHealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unknown => "UNKNOWN", - Self::Healthy => "HEALTHY", - Self::Unhealthy => "UNHEALTHY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HEALTHY" => Some(Self::Healthy), - "UNHEALTHY" => Some(Self::Unhealthy), - _ => None, - } - } -} -/// Generated client implementations. -pub mod api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ApiClient { - inner: tonic::client::Grpc, - } - impl ApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Generates a proof for the requested payload. - pub async fn prove( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/remote_prover.Api/Prove"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("remote_prover.Api", "Prove")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod proxy_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct ProxyStatusApiClient { - inner: tonic::client::Grpc, - } - impl ProxyStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ProxyStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ProxyStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - ProxyStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the proxy. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.ProxyStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.ProxyStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod worker_status_api_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct WorkerStatusApiClient { - inner: tonic::client::Grpc, - } - impl WorkerStatusApiClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl WorkerStatusApiClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> WorkerStatusApiClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - WorkerStatusApiClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status of the worker. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/remote_prover.WorkerStatusApi/Status", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("remote_prover.WorkerStatusApi", "Status")); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/crates/rocksdb-cxx-linkage-fix/Cargo.toml b/crates/rocksdb-cxx-linkage-fix/Cargo.toml new file mode 100644 index 0000000000..9e0eb23f7a --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors.workspace = true +description = "Miden C++ stdlib link helper" +edition.workspace = true +homepage.workspace = true +license.workspace = true +name = "miden-node-rocksdb-cxx-linkage-fix" +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lib] +path = "src/lib.rs" + +[lints] +workspace = true + +[dependencies] diff --git a/crates/rocksdb-cxx-linkage-fix/src/lib.rs b/crates/rocksdb-cxx-linkage-fix/src/lib.rs new file mode 100644 index 0000000000..35bc05d004 --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/src/lib.rs @@ -0,0 +1,51 @@ +//! A temporary solution to missing c++ std library linkage when using a precompile static library +//! +//! For more information see: + +use std::env; + +pub fn configure() { + println!("cargo:rerun-if-env-changed=ROCKSDB_COMPILE"); + println!("cargo:rerun-if-env-changed=ROCKSDB_LIB_DIR"); + println!("cargo:rerun-if-env-changed=ROCKSDB_STATIC"); + println!("cargo:rerun-if-env-changed=CXXSTDLIB"); + let target = env::var("TARGET").unwrap_or_default(); + if should_link_cpp_stdlib() { + link_cpp_stdlib(&target); + } +} + +fn should_compile() -> bool { + // in sync with + if let Ok(v) = env::var("ROCKSDB_COMPILE") { + if v.to_lowercase() == "true" || v == "1" { + return true; + } + } + false +} + +fn should_link_cpp_stdlib() -> bool { + if should_compile() { + return false; + } + // the value doesn't matter + // + env::var("ROCKSDB_STATIC").is_ok() + // `ROCKSDB_LIB_DIR` is not really discriminative, it only adds extra lookup dirs for the linker +} + +fn link_cpp_stdlib(target: &str) { + // aligned with + // + if let Ok(stdlib) = env::var("CXXSTDLIB") { + println!("cargo:rustc-link-lib=dylib={stdlib}"); + } else if target.contains("apple") || target.contains("freebsd") || target.contains("openbsd") { + println!("cargo:rustc-link-lib=dylib=c++"); + } else if target.contains("linux") { + println!("cargo:rustc-link-lib=dylib=stdc++"); + } else if target.contains("aix") { + println!("cargo:rustc-link-lib=dylib=c++"); + println!("cargo:rustc-link-lib=dylib=c++abi"); + } +} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 30ec4dcb84..537173e67d 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -38,10 +38,10 @@ url = { workspace = true } [dev-dependencies] miden-air = { features = ["testing"], workspace = true } -miden-node-store = { workspace = true } +miden-node-store = { features = ["rocksdb"], workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { workspace = true } -reqwest = { version = "0.12" } +reqwest = { workspace = true } rstest = { workspace = true } -tempfile = { version = "3.20" } +tempfile = { workspace = true } diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 4d3cf9387f..bfa7909105 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -24,8 +24,8 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [SubmitProvenTransaction](#submitproventransaction) - [SyncAccountVault](#SyncAccountVault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) @@ -215,25 +215,6 @@ When note synchronization fails, detailed error information is provided through --- -### SyncState - -Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts and -notes) the client is interested in. - -**Limits:** `account_id` (1000), `note_tag` (1000) - -This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block -number in the chain. Client is expected to repeat these requests in a loop until -`response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. - -Each request also returns info about new notes, accounts, etc. created. It also returns Chain MMR delta that can be -used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -For preserving some degree of privacy, note tags contain only high part of hashes. Thus, returned data contains excessive -notes, client can make additional filtering of that data on its side. - ---- - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. @@ -256,6 +237,14 @@ When storage map synchronization fails, detailed error information is provided t --- +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range along with pagination info so the caller can continue syncing until the chain tip. + +--- + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 13d26962eb..a0ec88859a 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -152,8 +152,13 @@ impl RpcService { } } +// API IMPLEMENTATION +// ================================================================================================ + #[tonic::async_trait] impl api_server::Api for RpcService { + // -- Nullifier endpoints ----------------------------------------------------------------- + async fn check_nullifiers( &self, request: Request, @@ -183,6 +188,8 @@ impl api_server::Api for RpcService { self.store.clone().sync_nullifiers(request).await } + // -- Block endpoints --------------------------------------------------------------------- + async fn get_block_header_by_number( &self, request: Request, @@ -192,27 +199,28 @@ impl api_server::Api for RpcService { self.store.clone().get_block_header_by_number(request).await } - async fn sync_state( + async fn get_block_by_number( &self, - request: Request, - ) -> Result, Status> { - debug!(target: COMPONENT, request = ?request.get_ref()); + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); - check::(request.get_ref().account_ids.len())?; - check::(request.get_ref().note_tags.len())?; + debug!(target: COMPONENT, ?request); - self.store.clone().sync_state(request).await + self.store.clone().get_block_by_number(request).await } - async fn sync_account_storage_maps( + async fn sync_chain_mmr( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); - self.store.clone().sync_account_storage_maps(request).await + self.store.clone().sync_chain_mmr(request).await } + // -- Note endpoints ---------------------------------------------------------------------- + async fn sync_notes( &self, request: Request, @@ -245,6 +253,26 @@ impl api_server::Api for RpcService { self.store.clone().get_notes_by_id(request).await } + async fn get_note_script_by_root( + &self, + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request); + + self.store.clone().get_note_script_by_root(request).await + } + + // -- Account endpoints ------------------------------------------------------------------- + + async fn sync_account_storage_maps( + &self, + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request.get_ref()); + + self.store.clone().sync_account_storage_maps(request).await + } + async fn sync_account_vault( &self, request: tonic::Request, @@ -255,6 +283,41 @@ impl api_server::Api for RpcService { self.store.clone().sync_account_vault(request).await } + /// Validates storage map key limits before forwarding the account request to the store. + async fn get_account( + &self, + request: Request, + ) -> Result, Status> { + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ + SlotData::AllEntries as ProtoMapAllEntries, SlotData::MapKeys as ProtoMapKeys, + }; + + let request = request.into_inner(); + + debug!(target: COMPONENT, ?request); + + // Validate total storage map key limit before forwarding to store + if let Some(details) = &request.details { + let total_keys: usize = details + .storage_maps + .iter() + .filter_map(|m| m.slot_data.as_ref()) + .filter_map(|d| match d { + ProtoMapKeys(keys) => Some(keys.map_keys.len()), + ProtoMapAllEntries(_) => None, + }) + .sum(); + check::(total_keys)?; + } + + self.store.clone().get_account(request).await + } + + // -- Transaction submission -------------------------------------------------------------- + + /// Deserializes and rebuilds the transaction with MAST decorators stripped from output note + /// scripts, verifies the transaction proof, optionally re-executes via the validator if + /// transaction inputs are provided, then forwards the transaction to the block producer. async fn submit_proven_transaction( &self, request: Request, @@ -288,18 +351,7 @@ impl api_server::Api for RpcService { .account_update_details(tx.account_update().details().clone()) .add_input_notes(tx.input_notes().iter().cloned()); - let stripped_outputs = tx.output_notes().iter().map(|note| match note { - OutputNote::Full(note) => { - let mut mast = note.script().mast().clone(); - Arc::make_mut(&mut mast).strip_decorators(); - let script = NoteScript::from_parts(mast, note.script().entrypoint()); - let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); - let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); - OutputNote::Full(new_note) - }, - other => other.clone(), - }); + let stripped_outputs = strip_output_note_decorators(tx.output_notes().iter()); builder = builder.add_output_notes(stripped_outputs); let rebuilt_tx = builder.build().map_err(|e| Status::invalid_argument(e.to_string()))?; let mut request = request; @@ -333,6 +385,8 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_transaction(request).await } + /// Deserializes the batch, strips MAST decorators from full output note scripts, rebuilds + /// the batch, then forwards it to the block producer. async fn submit_proven_batch( &self, request: tonic::Request, @@ -347,23 +401,8 @@ impl api_server::Api for RpcService { .map_err(|err| Status::invalid_argument(err.as_report_context("invalid batch")))?; // Build a new batch with output notes' decorators removed - let stripped_outputs: Vec = batch - .output_notes() - .iter() - .map(|note| match note { - OutputNote::Full(note) => { - let mut mast = note.script().mast().clone(); - Arc::make_mut(&mut mast).strip_decorators(); - let script = NoteScript::from_parts(mast, note.script().entrypoint()); - let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); - let new_note = - Note::new(note.assets().clone(), note.metadata().clone(), recipient); - OutputNote::Full(new_note) - }, - other => other.clone(), - }) - .collect(); + let stripped_outputs: Vec = + strip_output_note_decorators(batch.output_notes().iter()).collect(); let rebuilt_batch = ProvenBatch::new( batch.id(), @@ -391,45 +430,17 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_batch(request).await } - async fn get_block_by_number( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - - debug!(target: COMPONENT, ?request); - - self.store.clone().get_block_by_number(request).await - } + // -- Status & utility endpoints ---------------------------------------------------------- - async fn get_account( + async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { - use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ - SlotData::MapKeys as ProtoMapKeys, - SlotData::AllEntries as ProtoMapAllEntries - }; - - let request = request.into_inner(); - - debug!(target: COMPONENT, ?request); + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request); - // Validate total storage map key limit before forwarding to store - if let Some(details) = &request.details { - let total_keys: usize = details - .storage_maps - .iter() - .filter_map(|m| m.slot_data.as_ref()) - .filter_map(|d| match d { - ProtoMapKeys(keys) => Some(keys.map_keys.len()), - ProtoMapAllEntries(_) => None, - }) - .sum(); - check::(total_keys)?; - } + check::(request.get_ref().account_ids.len())?; - self.store.clone().get_account(request).await + self.store.clone().sync_transactions(request).await } async fn status( @@ -468,24 +479,6 @@ impl api_server::Api for RpcService { })) } - async fn get_note_script_by_root( - &self, - request: Request, - ) -> Result, Status> { - debug!(target: COMPONENT, request = ?request); - - self.store.clone().get_note_script_by_root(request).await - } - - async fn sync_transactions( - &self, - request: Request, - ) -> Result, Status> { - debug!(target: COMPONENT, request = ?request); - - self.store.clone().sync_transactions(request).await - } - async fn get_limits( &self, request: Request<()>, @@ -496,6 +489,29 @@ impl api_server::Api for RpcService { } } +// HELPERS +// ================================================================================================ + +/// Strips decorators from full output notes' scripts. +/// +/// This removes MAST decorators from note scripts before forwarding to the block producer, +/// as decorators are not needed for transaction processing. +fn strip_output_note_decorators<'a>( + notes: impl Iterator + 'a, +) -> impl Iterator + 'a { + notes.map(|note| match note { + OutputNote::Full(note) => { + let mut mast = note.script().mast().clone(); + Arc::make_mut(&mut mast).strip_decorators(); + let script = NoteScript::from_parts(mast, note.script().entrypoint()); + let recipient = NoteRecipient::new(note.serial_num(), script, note.storage().clone()); + let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); + OutputNote::Full(new_note) + }, + other => other.clone(), + }) +} + // LIMIT HELPERS // ================================================================================================ @@ -505,7 +521,6 @@ fn out_of_range_error(err: E) -> Status { } /// Check, but don't repeat ourselves mapping the error -#[allow(clippy::result_large_err)] fn check(n: usize) -> Result<(), Status> { ::check(n).map_err(out_of_range_error) } @@ -519,13 +534,11 @@ fn endpoint_limits(params: &[(&str, usize)]) -> proto::rpc::EndpointLimits { /// Cached RPC query parameter limits. static RPC_LIMITS: LazyLock = LazyLock::new(|| { - use { - QueryParamAccountIdLimit as AccountId, - QueryParamNoteIdLimit as NoteId, - QueryParamNoteTagLimit as NoteTag, - QueryParamNullifierLimit as Nullifier, - QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal, - }; + use QueryParamAccountIdLimit as AccountId; + use QueryParamNoteIdLimit as NoteId; + use QueryParamNoteTagLimit as NoteTag; + use QueryParamNullifierLimit as Nullifier; + use QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal; proto::rpc::RpcLimits { endpoints: std::collections::HashMap::from([ @@ -538,11 +551,8 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { endpoint_limits(&[(Nullifier::PARAM_NAME, Nullifier::LIMIT)]), ), ( - "SyncState".into(), - endpoint_limits(&[ - (AccountId::PARAM_NAME, AccountId::LIMIT), - (NoteTag::PARAM_NAME, NoteTag::LIMIT), - ]), + "SyncTransactions".into(), + endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), ), ("SyncNotes".into(), endpoint_limits(&[(NoteTag::PARAM_NAME, NoteTag::LIMIT)])), ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index b35fe8b6dc..0ac510fd37 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -13,12 +13,12 @@ use miden_node_utils::limiter::{ QueryParamAccountIdLimit, QueryParamLimiter, QueryParamNoteIdLimit, - QueryParamNoteTagLimit, QueryParamNullifierLimit, }; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ + Account, AccountBuilder, AccountDelta, AccountId, @@ -28,7 +28,7 @@ use miden_protocol::account::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::testing::noop_auth_component::NoopAuthComponent; -use miden_protocol::transaction::ProvenTransactionBuilder; +use miden_protocol::transaction::{ProvenTransaction, ProvenTransactionBuilder}; use miden_protocol::utils::Serializable; use miden_protocol::vm::ExecutionProof; use miden_standards::account::wallets::BasicWallet; @@ -40,11 +40,58 @@ use url::Url; use crate::Rpc; +/// Byte offset of the account delta commitment in serialized `ProvenTransaction`. +/// Layout: `AccountId` (15) + `initial_commitment` (32) + `final_commitment` (32) = 79 +const DELTA_COMMITMENT_BYTE_OFFSET: usize = 15 + 32 + 32; + +/// Creates a minimal account and its delta for testing proven transaction building. +fn build_test_account(seed: [u8; 32]) -> (Account, AccountDelta) { + let account = AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_assets(vec![]) + .with_component(BasicWallet) + .with_auth_component(NoopAuthComponent) + .build_existing() + .unwrap(); + + let delta: AccountDelta = account.clone().try_into().unwrap(); + (account, delta) +} + +/// Creates a minimal proven transaction for testing. +/// +/// This uses `ExecutionProof::new_dummy()` and is intended for tests that +/// need to test validation logic. +fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransaction { + let account_id = AccountId::dummy( + [0; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + ProvenTransactionBuilder::new( + account_id, + [8; 32].try_into().unwrap(), + account.to_commitment(), + delta.to_commitment(), + 0.into(), + Word::default(), + test_fee(), + u32::MAX.into(), + ExecutionProof::new_dummy(), + ) + .account_update_details(AccountUpdateDetails::Delta(delta.clone())) + .build() + .unwrap() +} + #[tokio::test] async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = { @@ -70,8 +117,8 @@ async fn rpc_server_accepts_requests_without_accept_header() { #[tokio::test] async fn rpc_server_accepts_requests_with_accept_header() { // Start the RPC. - let (mut rpc_client, _, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (mut rpc_client, _, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Send any request to the RPC. let response = send_request(&mut rpc_client).await; @@ -87,8 +134,9 @@ async fn rpc_server_accepts_requests_with_accept_header() { async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { for version in ["1.9.0", "0.8.1", "0.8.0", "0.999.0", "99.0.0"] { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = + start_store(store_listener).await; // Recreate the RPC client with an invalid version. let url = rpc_addr.to_string(); @@ -123,14 +171,14 @@ async fn rpc_startup_is_robust_to_network_failures() { // connect to each other on startup and that they reconnect after the store is restarted. // Start the RPC. - let (mut rpc_client, _, store_addr) = start_rpc().await; + let (mut rpc_client, _, store_listener) = start_rpc().await; // Test: requests against RPC api should fail immediately let response = send_request(&mut rpc_client).await; assert!(response.is_err()); // Start the store. - let (store_runtime, data_directory, _genesis) = start_store(store_addr).await; + let (store_runtime, data_directory, _genesis, store_addr) = start_store(store_listener).await; // Test: send request against RPC api and should succeed let response = send_request(&mut rpc_client).await; @@ -153,8 +201,8 @@ async fn rpc_startup_is_robust_to_network_failures() { #[tokio::test] async fn rpc_server_has_web_support() { // Start server - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Send a status request let client = reqwest::Client::new(); @@ -196,8 +244,11 @@ async fn rpc_server_has_web_support() { #[tokio::test] async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, genesis, _store_addr) = start_store(store_listener).await; + + // Wait for the store to be ready before sending requests. + tokio::time::sleep(Duration::from_millis(100)).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = @@ -209,54 +260,19 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); - - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let other_account = AccountBuilder::new([1; 32]) - .account_type(AccountType::RegularAccountUpdatableCode) - .storage_mode(AccountStorageMode::Private) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - let incorrect_commitment_delta: AccountDelta = other_account.try_into().unwrap(); - let incorrect_commitment_delta_bytes = incorrect_commitment_delta.to_commitment().as_bytes(); - - let account_delta: AccountDelta = account.clone().try_into().unwrap(); + // Build a valid proven transaction + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + // Create an incorrect delta commitment from a different account + let (other_account, _) = build_test_account([1; 32]); + let incorrect_delta: AccountDelta = other_account.try_into().unwrap(); + let incorrect_commitment_bytes = incorrect_delta.to_commitment().as_bytes(); + // Corrupt the transaction bytes with the incorrect delta commitment let mut tx_bytes = tx.to_bytes(); - let offset = 15 + 32 + 32; - tx_bytes[offset..offset + 32].copy_from_slice(&incorrect_commitment_delta_bytes); + tx_bytes[DELTA_COMMITMENT_BYTE_OFFSET..DELTA_COMMITMENT_BYTE_OFFSET + 32] + .copy_from_slice(&incorrect_commitment_bytes); let request = proto::transaction::ProvenTransaction { transaction: tx_bytes, @@ -282,8 +298,8 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { #[tokio::test] async fn rpc_server_rejects_tx_submissions_without_genesis() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = @@ -295,39 +311,8 @@ async fn rpc_server_rejects_tx_submissions_without_genesis() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); - - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let account_delta: AccountDelta = account.clone().try_into().unwrap(); - - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); let request = proto::transaction::ProvenTransaction { transaction: tx.to_bytes(), @@ -365,12 +350,9 @@ async fn send_request( /// Binds a socket on an available port, runs the RPC server on it, and /// returns a client to talk to the server, along with the socket address. -async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) { - let store_addr = { - let store_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); - store_listener.local_addr().expect("store should get a local address") - }; +async fn start_rpc() -> (RpcClient, std::net::SocketAddr, TcpListener) { + let store_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); + let store_addr = store_listener.local_addr().expect("store should get a local address"); let block_producer_addr = { let block_producer_listener = TcpListener::bind("127.0.0.1:0").await.expect("Failed to bind block-producer"); @@ -413,19 +395,23 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) .await .expect("Failed to build client"); - (rpc_client, rpc_addr, store_addr) + (rpc_client, rpc_addr, store_listener) } -async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { +async fn start_store(store_listener: TcpListener) -> (Runtime, TempDir, Word, SocketAddr) { // Start the store. let data_directory = tempfile::tempdir().expect("tempdir should be created"); let config = GenesisConfig::default(); let signer = SecretKey::new(); let (genesis_state, _) = config.into_state(signer).unwrap(); - Store::bootstrap(genesis_state.clone(), data_directory.path()).expect("store should bootstrap"); + Store::bootstrap(genesis_state.clone(), data_directory.path()) + .await + .expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); - let rpc_listener = TcpListener::bind(store_addr).await.expect("store should bind a port"); + let store_addr = + store_listener.local_addr().expect("store listener should get a local address"); + let rpc_listener = store_listener; let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") .await .expect("Failed to bind store ntx-builder gRPC endpoint"); @@ -439,6 +425,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, @@ -451,7 +438,8 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { ( store_runtime, data_directory, - genesis_state.into_block().unwrap().inner().header().commitment(), + genesis_state.into_block().await.unwrap().inner().header().commitment(), + store_addr, ) } @@ -479,6 +467,7 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, @@ -494,8 +483,8 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) #[tokio::test] async fn get_limits_endpoint() { // Start the RPC and store - let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (mut rpc_client, _rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; // Call the get_limits endpoint let response = rpc_client.get_limits(()).await.expect("get_limits should succeed"); @@ -509,27 +498,32 @@ async fn get_limits_endpoint() { limits.endpoints.get("CheckNullifiers").expect("CheckNullifiers should exist"); assert_eq!( - check_nullifiers.parameters.get("nullifier"), + check_nullifiers.parameters.get(QueryParamNullifierLimit::PARAM_NAME), Some(&(QueryParamNullifierLimit::LIMIT as u32)), - "CheckNullifiers nullifier limit should be {}", + "CheckNullifiers {} limit should be {}", + QueryParamNullifierLimit::PARAM_NAME, QueryParamNullifierLimit::LIMIT ); - // Verify SyncState endpoint has multiple parameters - let sync_state = limits.endpoints.get("SyncState").expect("SyncState should exist"); + let sync_transactions = + limits.endpoints.get("SyncTransactions").expect("SyncTransactions should exist"); assert_eq!( - sync_state.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), + sync_transactions.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), Some(&(QueryParamAccountIdLimit::LIMIT as u32)), - "SyncState {} limit should be {}", + "SyncTransactions {} limit should be {}", QueryParamAccountIdLimit::PARAM_NAME, QueryParamAccountIdLimit::LIMIT ); - assert_eq!( - sync_state.parameters.get(QueryParamNoteTagLimit::PARAM_NAME), - Some(&(QueryParamNoteTagLimit::LIMIT as u32)), - "SyncState {} limit should be {}", - QueryParamNoteTagLimit::PARAM_NAME, - QueryParamNoteTagLimit::LIMIT + + // SyncAccountVault and SyncAccountStorageMaps accept a singular account_id, + // not a repeated list, so they do not have list parameter limits. + assert!( + !limits.endpoints.contains_key("SyncAccountVault"), + "SyncAccountVault should not have list parameter limits" + ); + assert!( + !limits.endpoints.contains_key("SyncAccountStorageMaps"), + "SyncAccountStorageMaps should not have list parameter limits" ); // Verify GetNotesById endpoint @@ -545,3 +539,21 @@ async fn get_limits_endpoint() { // Shutdown to avoid runtime drop error. shutdown_store(store_runtime).await; } + +#[tokio::test] +async fn sync_chain_mmr_returns_delta() { + let (mut rpc_client, _rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; + + let request = proto::rpc::SyncChainMmrRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), + }; + let response = rpc_client.sync_chain_mmr(request).await.expect("sync_chain_mmr should succeed"); + let response = response.into_inner(); + + let mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); + assert_eq!(mmr_delta.forest, 0); + assert!(mmr_delta.data.is_empty()); + + shutdown_store(store_runtime).await; +} diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 1c62c7ab7c..d0642a8193 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -15,21 +15,25 @@ version.workspace = true workspace = true [dependencies] -anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } -fs-err = { workspace = true } -hex = { version = "0.4" } -indexmap = { workspace = true } -libsqlite3-sys = { workspace = true } -miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } -miden-node-proto = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miden-node-utils = { workspace = true } -miden-standards = { workspace = true } +anyhow = { workspace = true } +deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } +deadpool-diesel = { features = ["sqlite"], version = "0.6" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } +fs-err = { workspace = true } +futures = { workspace = true } +hex = { version = "0.4" } +indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } +miden-block-prover = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } +miden-large-smt-backend-rocksdb = { optional = true, workspace = true } +miden-node-db = { workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-utils = { workspace = true } +miden-remote-prover-client = { features = ["block-prover"], workspace = true } +miden-standards = { workspace = true } # TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` miden-protocol = { features = ["std", "testing"], workspace = true } pretty_assertions = { workspace = true } @@ -39,15 +43,24 @@ serde = { features = ["derive"], version = "1" } thiserror = { workspace = true } tokio = { features = ["fs", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -toml = { version = "0.9" } +toml = { workspace = true } tonic = { default-features = true, workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } +url = { workspace = true } + +[build-dependencies] +build-rs = { workspace = true } +fs-err = { workspace = true } +miden-agglayer = { features = ["testing"], version = "=0.14.0-alpha.1" } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miden-protocol = { features = ["std"], workspace = true } +miden-standards = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -criterion = { version = "0.5" } +criterion = "0.8" fs-err = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } @@ -55,11 +68,12 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } -termtree = { version = "0.5" } +tempfile = { workspace = true } +termtree = "1.0" [features] default = ["rocksdb"] -rocksdb = ["miden-crypto/rocksdb"] +rocksdb = ["dep:miden-large-smt-backend-rocksdb"] [[bench]] harness = false diff --git a/crates/store/README.md b/crates/store/README.md index ea44889d04..65a4f148ba 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -54,8 +54,8 @@ The full gRPC API can be found [here](../../proto/proto/store.proto). - [SyncNullifiers](#syncnullifiers) - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) @@ -228,23 +228,6 @@ When note synchronization fails, detailed error information is provided through --- -### SyncState - -Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts, -notes, nullifiers) the client is interested in. - -This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block -number in the chain. Client is expected to repeat these requests in a loop until -`response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. - -Each request also returns info about new notes, nullifiers etc. created. It also returns Chain MMR delta that can be -used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -For preserving some degree of privacy, note tags and nullifiers filters contain only high part of hashes. Thus, returned -data contains excessive notes and nullifiers, client can make additional filtering of that data on its side. - ---- - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. @@ -267,6 +250,14 @@ When storage map synchronization fails, detailed error information is provided t --- +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range and the returned `block_range` reflects the last block included, which may be the chain tip. + +--- + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/crates/store/benches/account_tree.rs b/crates/store/benches/account_tree.rs index 8c3f1009ec..e69da7714d 100644 --- a/crates/store/benches/account_tree.rs +++ b/crates/store/benches/account_tree.rs @@ -3,7 +3,7 @@ use std::path::Path; use std::sync::atomic::{AtomicUsize, Ordering}; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; -use miden_crypto::merkle::smt::{RocksDbConfig, RocksDbStorage}; +use miden_large_smt_backend_rocksdb::{RocksDbConfig, RocksDbStorage}; use miden_node_store::AccountTreeWithHistory; use miden_protocol::Word; use miden_protocol::account::AccountId; diff --git a/crates/store/build.rs b/crates/store/build.rs index d08f3fd0e6..c03975760c 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -1,9 +1,157 @@ // This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in // `store/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . + +use std::path::PathBuf; +use std::sync::Arc; + +use miden_agglayer::{ + EthAddressFormat, + create_existing_agglayer_faucet, + create_existing_bridge_account, +}; +use miden_protocol::account::auth::AuthScheme; +use miden_protocol::account::{Account, AccountCode, AccountFile, AccountStorageMode, AccountType}; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; +use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::{Felt, Word}; +use miden_standards::AuthMethod; +use miden_standards::account::wallets::create_basic_wallet; + fn main() { - println!("cargo:rerun-if-changed=./src/db/migrations"); + build_rs::output::rerun_if_changed("src/db/migrations"); // If we do one re-write, the default rules are disabled, // hence we need to trigger explicitly on `Cargo.toml`. // - println!("cargo:rerun-if-changed=Cargo.toml"); + build_rs::output::rerun_if_changed("Cargo.toml"); + + // Generate sample agglayer account files for genesis config samples. + generate_agglayer_sample_accounts(); + miden_node_rocksdb_cxx_linkage_fix::configure(); +} + +/// Generates sample agglayer account files for the `02-with-account-files` genesis config sample. +/// +/// Creates: +/// - `02-with-account-files/bridge.mac` - agglayer bridge account +/// - `02-with-account-files/agglayer_faucet_eth.mac` - agglayer faucet for wrapped ETH +/// - `02-with-account-files/agglayer_faucet_usdc.mac` - agglayer faucet for wrapped USDC +fn generate_agglayer_sample_accounts() { + // Use CARGO_MANIFEST_DIR to get the absolute path to the crate root + let manifest_dir = build_rs::input::cargo_manifest_dir(); + let samples_dir: PathBuf = + manifest_dir.join("src/genesis/config/samples/02-with-account-files"); + + // Create the directory if it doesn't exist + fs_err::create_dir_all(&samples_dir).expect("Failed to create samples directory"); + + // Use deterministic seeds for reproducible builds. + // WARNING: DO NOT USE THESE IN PRODUCTION + let bridge_seed: Word = Word::new([Felt::new(1u64); 4]); + let eth_faucet_seed: Word = Word::new([Felt::new(2u64); 4]); + let usdc_faucet_seed: Word = Word::new([Felt::new(3u64); 4]); + + // Create bridge admin and GER manager as proper wallet accounts. + // WARNING: DO NOT USE THESE IN PRODUCTION + let bridge_admin_key = + SecretKey::with_rng(&mut RpoRandomCoin::new(Word::new([Felt::new(4u64); 4]))); + let ger_manager_key = + SecretKey::with_rng(&mut RpoRandomCoin::new(Word::new([Felt::new(5u64); 4]))); + + let bridge_admin = create_basic_wallet( + [4u8; 32], + AuthMethod::SingleSig { + approver: (bridge_admin_key.public_key().into(), AuthScheme::Falcon512Rpo), + }, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ) + .expect("bridge admin account should be valid"); + + let ger_manager = create_basic_wallet( + [5u8; 32], + AuthMethod::SingleSig { + approver: (ger_manager_key.public_key().into(), AuthScheme::Falcon512Rpo), + }, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ) + .expect("GER manager account should be valid"); + + let bridge_admin_id = bridge_admin.id(); + let ger_manager_id = ger_manager.id(); + + // Create the bridge account first (faucets need to reference it) + // Use "existing" variant so accounts have nonce > 0 (required for genesis) + let bridge_account = + create_existing_bridge_account(bridge_seed, bridge_admin_id, ger_manager_id); + let bridge_account_id = bridge_account.id(); + + // Placeholder Ethereum addresses for sample faucets. + // WARNING: DO NOT USE THESE ADDRESSES IN PRODUCTION + let eth_origin_address = EthAddressFormat::new([1u8; 20]); + let usdc_origin_address = EthAddressFormat::new([2u8; 20]); + + // Create AggLayer faucets using "existing" variant + // ETH: 8 decimals (protocol max is 12), max supply of 1 billion tokens + let eth_faucet = create_existing_agglayer_faucet( + eth_faucet_seed, + "ETH", + 8, + Felt::new(1_000_000_000), + Felt::new(0), + bridge_account_id, + ð_origin_address, + 0u32, + 10u8, + ); + + // USDC: 6 decimals, max supply of 10 billion tokens + let usdc_faucet = create_existing_agglayer_faucet( + usdc_faucet_seed, + "USDC", + 6, + Felt::new(10_000_000_000), + Felt::new(0), + bridge_account_id, + &usdc_origin_address, + 0u32, + 10u8, + ); + + // Strip source location decorators from account code to ensure deterministic output. + let bridge_account = strip_code_decorators(bridge_account); + let eth_faucet = strip_code_decorators(eth_faucet); + let usdc_faucet = strip_code_decorators(usdc_faucet); + + // Save account files (without secret keys since these use NoAuth) + let bridge_file = AccountFile::new(bridge_account, vec![]); + let eth_faucet_file = AccountFile::new(eth_faucet, vec![]); + let usdc_faucet_file = AccountFile::new(usdc_faucet, vec![]); + + // Write files + bridge_file + .write(samples_dir.join("bridge.mac")) + .expect("Failed to write bridge.mac"); + eth_faucet_file + .write(samples_dir.join("agglayer_faucet_eth.mac")) + .expect("Failed to write agglayer_faucet_eth.mac"); + usdc_faucet_file + .write(samples_dir.join("agglayer_faucet_usdc.mac")) + .expect("Failed to write agglayer_faucet_usdc.mac"); +} + +/// Strips source location decorators from an account's code MAST forest. +/// +/// This is necessary because the MAST forest embeds absolute file paths from the Cargo build +/// directory, which include a hash that differs between `cargo check` and `cargo build`. Stripping +/// decorators ensures the serialized `.mac` files are identical regardless of which cargo command +/// is used (CI or local builds or tests). +fn strip_code_decorators(account: Account) -> Account { + let (id, vault, storage, code, nonce, seed) = account.into_parts(); + + let mut mast = code.mast(); + Arc::make_mut(&mut mast).strip_decorators(); + let code = AccountCode::from_parts(mast, code.procedures().to_vec()); + + Account::new_unchecked(id, vault, storage, code, nonce, seed) } diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index d015408adb..f9815190bd 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -2,6 +2,8 @@ use std::collections::{BTreeMap, HashMap}; +#[cfg(feature = "rocksdb")] +use miden_large_smt_backend_rocksdb::RocksDbStorage; use miden_protocol::account::{AccountId, AccountIdPrefix}; use miden_protocol::block::BlockNumber; use miden_protocol::block::account_tree::{AccountMutationSet, AccountTree, AccountWitness}; @@ -32,12 +34,12 @@ pub type InMemoryAccountTree = AccountTree>; #[cfg(feature = "rocksdb")] /// Convenience for a persistent account tree. -pub type PersistentAccountTree = AccountTree>; +pub type PersistentAccountTree = AccountTree>; // HISTORICAL ERROR TYPES // ================================================================================================ -#[allow(missing_docs)] +#[expect(missing_docs)] #[derive(thiserror::Error, Debug)] pub enum HistoricalError { #[error(transparent)] diff --git a/crates/store/src/accounts/tests.rs b/crates/store/src/accounts/tests.rs index f709289469..9f7b5dcbd8 100644 --- a/crates/store/src/accounts/tests.rs +++ b/crates/store/src/accounts/tests.rs @@ -1,10 +1,9 @@ //! Tests for `AccountTreeWithHistory` #[cfg(test)] -#[allow(clippy::similar_names)] -#[allow(clippy::needless_range_loop)] -#[allow(clippy::uninlined_format_args)] -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::needless_range_loop)] +#[expect(clippy::uninlined_format_args)] +#[expect(clippy::cast_sign_loss)] mod account_tree_with_history_tests { use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -152,12 +151,12 @@ mod account_tree_with_history_tests { fn test_many_accounts_sequential_updates() { // Create 50 different account IDs let account_count = 50; - let ids: Vec<_> = (0..account_count) + let account_ids: Vec<_> = (0..account_count) .map(|i| AccountIdBuilder::new().build_with_seed([i as u8; 32])) .collect(); // Create initial state with all accounts having value [i, 0, 0, 0] - let initial_state: Vec<_> = ids + let initial_state: Vec<_> = account_ids .iter() .enumerate() .map(|(i, &id)| (id, Word::from([i as u32, 0, 0, 0]))) @@ -173,7 +172,7 @@ mod account_tree_with_history_tests { .map(|i| { let idx = ((block - 1) * 5 + i) % account_count; let new_value = Word::from([idx as u32 + block as u32 * 100, 0, 0, 0]); - (ids[idx], new_value) + (account_ids[idx], new_value) }) .collect(); hist.compute_and_apply_mutations(updates).unwrap(); @@ -184,7 +183,7 @@ mod account_tree_with_history_tests { // Check genesis state for a few accounts for i in 0..4 { - let witness = hist.open_at(ids[i], BlockNumber::GENESIS).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::GENESIS).unwrap(); assert_eq!( witness.state_commitment(), Word::from([i as u32, 0, 0, 0]), @@ -197,7 +196,8 @@ mod account_tree_with_history_tests { for block in 1..=num_blocks { for i in 0..5 { let idx = ((block - 1) * 5 + i) % account_count; - let witness = hist.open_at(ids[idx], BlockNumber::from(block as u32)).unwrap(); + let witness = + hist.open_at(account_ids[idx], BlockNumber::from(block as u32)).unwrap(); let expected = Word::from([idx as u32 + block as u32 * 100, 0, 0, 0]); assert_eq!( witness.state_commitment(), @@ -302,7 +302,7 @@ mod account_tree_with_history_tests { fn test_sparse_updates_many_accounts() { // Create 200 accounts but only update a few at a time let account_count = 200; - let ids: Vec<_> = (0..account_count) + let account_ids: Vec<_> = (0..account_count) .map(|i| { let mut seed = [0u8; 32]; seed[0] = i as u8; @@ -312,7 +312,7 @@ mod account_tree_with_history_tests { .collect(); // Create initial state with first 50 accounts - let initial_state: Vec<_> = ids + let initial_state: Vec<_> = account_ids .iter() .take(50) .enumerate() @@ -323,7 +323,7 @@ mod account_tree_with_history_tests { let mut hist = AccountTreeWithHistory::new(initial_tree, BlockNumber::GENESIS); // Block 1: Add 50 more accounts - let updates1: Vec<_> = ids + let updates1: Vec<_> = account_ids .iter() .skip(50) .take(50) @@ -333,7 +333,7 @@ mod account_tree_with_history_tests { hist.compute_and_apply_mutations(updates1).unwrap(); // Block 2: Update every 10th account - let updates2: Vec<_> = ids + let updates2: Vec<_> = account_ids .iter() .enumerate() .filter(|(i, _)| i % 10 == 0) @@ -343,7 +343,7 @@ mod account_tree_with_history_tests { hist.compute_and_apply_mutations(updates2).unwrap(); // Block 3: Add remaining accounts - let updates3: Vec<_> = ids + let updates3: Vec<_> = account_ids .iter() .skip(100) .enumerate() @@ -354,13 +354,13 @@ mod account_tree_with_history_tests { // Verify states at different blocks // Check genesis - first 50 accounts exist, others don't for i in 0..50 { - let witness = hist.open_at(ids[i], BlockNumber::GENESIS).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::GENESIS).unwrap(); assert_eq!(witness.state_commitment(), Word::from([i as u32, 0, 0, 0])); } // Check block 1 - first 100 accounts exist for i in 50..100 { - let witness = hist.open_at(ids[i], BlockNumber::from(1)).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::from(1)).unwrap(); assert_eq!(witness.state_commitment(), Word::from([i as u32, 1, 0, 0])); } @@ -368,14 +368,14 @@ mod account_tree_with_history_tests { for i in 0..10 { let idx = i * 10; if idx < 100 { - let witness = hist.open_at(ids[idx], BlockNumber::from(2)).unwrap(); + let witness = hist.open_at(account_ids[idx], BlockNumber::from(2)).unwrap(); assert_eq!(witness.state_commitment(), Word::from([idx as u32, 2, 0, 0])); } } // Check block 3 - all 200 accounts should be accessible for i in [0, 50, 100, 150, 199] { - let witness = hist.open_at(ids[i], BlockNumber::from(3)); + let witness = hist.open_at(account_ids[i], BlockNumber::from(3)); assert!(witness.is_some(), "Account {} should exist at block 3", i); } } diff --git a/crates/store/src/db/migrations.rs b/crates/store/src/db/migrations.rs index 01521e5787..10ce01409e 100644 --- a/crates/store/src/db/migrations.rs +++ b/crates/store/src/db/migrations.rs @@ -13,16 +13,16 @@ pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations" #[instrument(level = "debug", target = COMPONENT, skip_all, err)] pub fn apply_migrations( conn: &mut SqliteConnection, -) -> std::result::Result<(), crate::errors::DatabaseError> { +) -> std::result::Result<(), miden_node_db::DatabaseError> { let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); - tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); + tracing::info!(target = COMPONENT, migrations = migrations.len(), "Applying migrations"); let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { // Migrations applied successfully, verify schema hash verify_schema(conn)?; return Ok(()); }; - tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); + tracing::warn!(target = COMPONENT, error = ?e, "Failed to apply migration"); // something went wrong, MIGRATIONS contains conn.revert_last_migration(MIGRATIONS) .expect("Duality is maintained by the developer"); diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 0858e71d10..1f0e151ab1 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -1,6 +1,8 @@ CREATE TABLE block_headers ( block_num INTEGER NOT NULL, block_header BLOB NOT NULL, + signature BLOB NOT NULL, + commitment BLOB NOT NULL, PRIMARY KEY (block_num), CONSTRAINT block_header_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) @@ -59,7 +61,7 @@ CREATE TABLE notes ( consumed_at INTEGER, -- Block number when the note was consumed nullifier BLOB, -- Only known for public notes, null for private notes assets BLOB, - inputs BLOB, + storage BLOB, script_root BLOB, serial_num BLOB, @@ -155,3 +157,9 @@ CREATE TABLE transactions ( CREATE INDEX idx_transactions_account_id ON transactions(account_id); -- Index for joining with block_headers CREATE INDEX idx_transactions_block_num ON transactions(block_num); + +CREATE INDEX idx_vault_cleanup ON account_vault_assets(block_num) WHERE is_latest = 0; +CREATE INDEX idx_storage_cleanup ON account_storage_map_values(block_num) WHERE is_latest = 0; + +CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; +CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql deleted file mode 100644 index 1a15b55c4d..0000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP INDEX IF EXISTS idx_account_storage_map_latest_by_account_slot_key; -DROP INDEX IF EXISTS idx_account_vault_assets_latest_by_account_key; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql deleted file mode 100644 index 83233e157e..0000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql +++ /dev/null @@ -1,2 +0,0 @@ -CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; -CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fc4a5cab4..9fe6aec756 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,16 +1,16 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::ops::RangeInclusive; +use std::ops::{Deref, DerefMut, RangeInclusive}; use std::path::PathBuf; use anyhow::Context; use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; -use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; +use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::generated as proto; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; use miden_protocol::asset::{Asset, AssetVaultKey}; -use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; +use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, SignedBlock}; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ NoteDetails, @@ -23,19 +23,21 @@ use miden_protocol::note::{ use miden_protocol::transaction::TransactionId; use miden_protocol::utils::{Deserializable, Serializable}; use tokio::sync::oneshot; -use tracing::{Instrument, info, instrument}; +use tracing::{info, instrument}; use crate::COMPONENT; -use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; -use crate::db::models::queries::StorageMapValuesPage; +pub use crate::db::models::queries::{ + AccountCommitmentsPage, + NullifiersPage, + PublicAccountIdsPage, +}; +use crate::db::models::queries::{BlockHeaderCommitment, StorageMapValuesPage}; use crate::db::models::{Page, queries}; -use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; +use crate::errors::{DatabaseError, NoteSyncError}; use crate::genesis::GenesisBlock; -pub(crate) mod manager; - mod migrations; mod schema_hash; @@ -45,12 +47,47 @@ mod tests; pub(crate) mod models; /// [diesel](https://diesel.rs) generated schema +/// +/// ```sh +/// cargo binstall diesel_cli +/// sqlite3 -init ./src/db/migrations/001-init.sql ephemeral_setup.db "" +/// diesel setup --database-url=./ephemeral_setup.db +/// diesel print-schema > src/db/schema.rs +/// ``` +/// +/// which assumes an _existing_ database. +/// +/// Unfortunately, there is no systematic way of modifying the schema other +/// than patching (in the diff sense) which is brittle at best. +/// So the above must be followed by a manual editing step, for now it's +/// limited to: +/// +/// * `i64`/`u64` being represented as `BigInt` +/// +/// The list might be extended. pub(crate) mod schema; pub type Result = std::result::Result; +/// The Store's database. +/// +/// Extends the underlying [`miden_node_db::Db`] type with functionality specific to the Store. pub struct Db { - pool: deadpool_diesel::Pool>, + db: miden_node_db::Db, +} + +impl Deref for Db { + type Target = miden_node_db::Db; + + fn deref(&self) -> &Self::Target { + &self.db + } +} + +impl DerefMut for Db { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.db + } } /// Describes the value of an asset for an account ID at `block_num` specifically. @@ -88,13 +125,6 @@ impl PartialEq<(Nullifier, BlockNumber)> for NullifierInfo { } } -#[derive(Debug, PartialEq)] -pub struct TransactionSummary { - pub account_id: AccountId, - pub block_num: BlockNumber, - pub transaction_id: TransactionId, -} - #[derive(Debug, PartialEq)] pub struct TransactionRecord { pub block_num: BlockNumber, @@ -172,14 +202,6 @@ impl From for proto::note::NoteSyncRecord { } } -#[derive(Debug, PartialEq)] -pub struct StateSyncUpdate { - pub notes: Vec, - pub block_header: BlockHeader, - pub account_updates: Vec, - pub transactions: Vec, -} - #[derive(Debug, PartialEq)] pub struct NoteSyncUpdate { pub notes: Vec, @@ -238,7 +260,7 @@ impl Db { ) .context("failed to open a database connection")?; - configure_connection_on_creation(&mut conn)?; + miden_node_db::configure_connection_on_creation(&mut conn)?; // Run migrations. apply_migrations(&mut conn).context("failed to apply database migrations")?; @@ -249,6 +271,7 @@ impl Db { models::queries::apply_block( conn, genesis.header(), + genesis.signature(), &[], &[], genesis.body().updated_accounts(), @@ -259,83 +282,42 @@ impl Db { Ok(()) } - /// Create and commit a transaction with the queries added in the provided closure - pub(crate) async fn transact(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send - + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result - + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .in_current_span() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) - .in_current_span() - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - - /// Run the query _without_ a transaction - pub(crate) async fn query(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(move |conn| { - let r = query(conn)?; - Ok(r) - }) - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - /// Open a connection to the DB and apply any pending migrations. #[instrument(target = COMPONENT, skip_all)] - pub async fn load(database_filepath: PathBuf) -> Result { - let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); - let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; - + pub async fn load(database_filepath: PathBuf) -> Result { + let db = miden_node_db::Db::new(&database_filepath)?; info!( target: COMPONENT, sqlite= %database_filepath.display(), "Connected to the database" ); - let me = Db { pool }; - me.query("migrations", apply_migrations).await?; - Ok(me) + db.query("migrations", apply_migrations).await?; + Ok(Self { db }) } - /// Loads all the nullifiers from the DB. + /// Returns a page of nullifiers for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub(crate) async fn select_all_nullifiers(&self) -> Result> { - self.transact("all nullifiers", move |conn| { - let nullifiers = queries::select_all_nullifiers(conn)?; - Ok(nullifiers) + pub async fn select_nullifiers_paged( + &self, + page_size: std::num::NonZeroUsize, + after_nullifier: Option, + ) -> Result { + self.transact("read nullifiers paged", move |conn| { + queries::select_nullifiers_paged(conn, page_size, after_nullifier) }) .await } /// Loads the nullifiers that match the prefixes from the DB. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + #[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(prefix_len, prefixes = nullifier_prefixes.len()), + ret(level = "debug"), + err + )] pub async fn select_nullifiers_by_prefix( &self, prefix_len: u32, @@ -395,20 +377,38 @@ impl Db { .await } - /// TODO marked for removal, replace with paged version + /// Loads all the block headers from the DB. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_block_header_commitments(&self) -> Result> { + self.transact("all block headers", |conn| { + let raw = queries::select_all_block_header_commitments(conn)?; + Ok(raw) + }) + .await + } + + /// Returns a page of account commitments for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_account_commitments(&self) -> Result> { - self.transact("read all account commitments", move |conn| { - queries::select_all_account_commitments(conn) + pub async fn select_account_commitments_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read account commitments paged", move |conn| { + queries::select_account_commitments_paged(conn, page_size, after_account_id) }) .await } - /// Returns all account IDs that have public state. + /// Returns a page of public account IDs for forest rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_public_account_ids(&self) -> Result> { - self.transact("read all public account IDs", move |conn| { - queries::select_all_public_account_ids(conn) + pub async fn select_public_account_ids_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read public account IDs paged", move |conn| { + queries::select_public_account_ids_paged(conn, page_size, after_account_id) }) .await } @@ -497,19 +497,6 @@ impl Db { .await } - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn get_state_sync( - &self, - block_number: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result { - self.transact::("state sync", move |conn| { - queries::get_state_sync(conn, block_number, account_ids, note_tags) - }) - .await - } - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_note_sync( &self, @@ -566,17 +553,18 @@ impl Db { &self, allow_acquire: oneshot::Sender<()>, acquire_done: oneshot::Receiver<()>, - block: ProvenBlock, + signed_block: SignedBlock, notes: Vec<(NoteRecord, Option)>, ) -> Result<()> { self.transact("apply block", move |conn| -> Result<()> { models::queries::apply_block( conn, - block.header(), + signed_block.header(), + signed_block.signature(), ¬es, - block.body().created_nullifiers(), - block.body().updated_accounts(), - block.body().transactions(), + signed_block.body().created_nullifiers(), + signed_block.body().updated_accounts(), + signed_block.body().transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist @@ -585,6 +573,8 @@ impl Db { tracing::warn!(target: COMPONENT, "failed to send notification for successful block application, potential deadlock"); } + models::queries::prune_history(conn, signed_block.header().block_num())?; + acquire_done.blocking_recv()?; Ok(()) diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 2e6313bf61..2176ea0d46 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -14,30 +14,32 @@ //! Notice: Changing any of these are _backwards-incompatible_ changes that are not caught/covered //! by migrations! -#![allow( +#![expect( clippy::inline_always, reason = "Just unification helpers of 1-2 lines of casting types" )] -#![allow( +#![expect( dead_code, reason = "Not all converters are used bidirectionally, however, keeping them is a good thing" )] -#![allow( +#![expect( clippy::cast_sign_loss, reason = "This is the one file where we map the signed database types to the working types" )] -#![allow( +#![expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize casting will cause issues on relevant platforms" )] +use miden_crypto::Word; +use miden_crypto::utils::Deserializable; use miden_protocol::Felt; use miden_protocol::account::{StorageSlotName, StorageSlotType}; -use miden_protocol::block::BlockNumber; +use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::NoteTag; -use crate::db::models::queries::NetworkAccountType; +use crate::db::models::queries::{BlockHeaderCommitment, NetworkAccountType}; #[derive(Debug, thiserror::Error)] #[error("failed to convert from database type {from_type} into {into_type}")] @@ -50,7 +52,7 @@ pub struct DatabaseTypeConversionError { /// Convert from and to it's database representation and back /// /// We do not assume sanity of DB types. -pub(crate) trait SqlTypeConvert: Sized { +pub trait SqlTypeConvert: Sized { type Raw: Sized; fn to_raw_sql(self) -> Self::Raw; @@ -67,6 +69,32 @@ pub(crate) trait SqlTypeConvert: Sized { } } +impl SqlTypeConvert for BlockHeaderCommitment { + type Raw = Vec; + fn from_raw_sql( + raw: Self::Raw, + ) -> Result { + let inner = + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err)?; + Ok(BlockHeaderCommitment(inner)) + } + fn to_raw_sql(self) -> Self::Raw { + self.0.as_bytes().to_vec() + } +} + +impl SqlTypeConvert for BlockHeader { + type Raw = Vec; + + fn from_raw_sql(raw: Self::Raw) -> Result { + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + miden_crypto::utils::Serializable::to_bytes(&self) + } +} + impl SqlTypeConvert for NetworkAccountType { type Raw = i32; @@ -107,7 +135,7 @@ impl SqlTypeConvert for NoteTag { #[inline(always)] fn from_raw_sql(raw: Self::Raw) -> Result { - #[allow(clippy::cast_sign_loss)] + #[expect(clippy::cast_sign_loss)] Ok(NoteTag::new(raw as u32)) } @@ -189,7 +217,7 @@ pub(crate) fn fungible_delta_to_raw_sql(delta: i64) -> i64 { } #[inline(always)] -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] pub(crate) fn raw_sql_to_note_type(raw: i32) -> u8 { raw as u8 } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 6f8e3834f6..c74f83401f 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -17,22 +18,17 @@ use diesel::{ SqliteConnection, }; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; -use miden_node_utils::limiter::{ - MAX_RESPONSE_PAYLOAD_BYTES, - QueryParamAccountIdLimit, - QueryParamLimiter, -}; -use miden_protocol::Word; +use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, AccountCode, - AccountDelta, AccountId, AccountStorage, AccountStorageHeader, NonFungibleDeltaAction, StorageMap, + StorageMapKey, StorageSlot, StorageSlotContent, StorageSlotName, @@ -41,10 +37,12 @@ use miden_protocol::account::{ use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, Word}; use crate::COMPONENT; use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; -use crate::db::models::{serialize_vec, vec_raw_try_into}; +#[cfg(test)] +use crate::db::models::vec_raw_try_into; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; @@ -54,10 +52,21 @@ pub(crate) use at_block::{ select_account_vault_at_block, }; +mod delta; +use delta::{ + AccountStateForInsert, + PartialAccountState, + apply_storage_delta, + select_minimal_account_state_headers, + select_vault_balances_by_faucet_ids, +}; + #[cfg(test)] mod tests; type StorageMapValueRow = (i64, String, Vec, Vec); +type StorageHeaderWithEntries = + (AccountStorageHeader, BTreeMap>); // NETWORK ACCOUNT TYPE // ================================================================================================ @@ -164,7 +173,7 @@ pub(crate) fn select_account( /// `State` which contains an `SmtForest` to serve the latest and most recent /// historical data. // TODO: remove eventually once refactoring is complete -fn select_full_account( +pub(crate) fn select_full_account( conn: &mut SqliteConnection, account_id: AccountId, ) -> Result { @@ -254,11 +263,19 @@ pub(crate) fn select_network_account_by_id( } } -/// Select all account commitments from the DB using the given [`SqliteConnection`]. -/// -/// # Returns +/// Page of account commitments returned by [`select_account_commitments_paged`]. +#[derive(Debug)] +pub struct AccountCommitmentsPage { + /// The account commitments in this page. + pub commitments: Vec<(AccountId, Word)>, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, +} + +/// Selects account commitments with pagination. /// -/// The vector with the account id and corresponding commitment, or an error. +/// Returns up to `page_size` account commitments, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. /// /// # Raw SQL /// @@ -270,31 +287,71 @@ pub(crate) fn select_network_account_by_id( /// accounts /// WHERE /// is_latest = 1 +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_account_commitments( +pub(crate) fn select_account_commitments_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - let raw = SelectDsl::select( + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; + + // Fetch one extra to determine if there are more results + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select( schema::accounts::table, (schema::accounts::account_id, schema::accounts::account_commitment), ) .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::<(Vec, Vec)>(conn)?; + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); - Result::, DatabaseError>::from_iter(raw.into_iter().map( + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } + + let raw = query.load::<(Vec, Vec)>(conn)?; + + let mut commitments = Result::, DatabaseError>::from_iter(raw.into_iter().map( |(ref account, ref commitment)| { Ok((AccountId::read_from_bytes(account)?, Word::read_from_bytes(commitment)?)) }, - )) + ))?; + + // If we got more than page_size, there are more results + let next_cursor = if commitments.len() > page_size.get() { + commitments.pop(); // Remove the extra element + commitments.last().map(|(id, _)| *id) + } else { + None + }; + + Ok(AccountCommitmentsPage { commitments, next_cursor }) } -/// Select all account IDs that have public state. +/// Page of public account IDs returned by [`select_public_account_ids_paged`]. +#[derive(Debug)] +pub struct PublicAccountIdsPage { + /// The public account IDs in this page. + pub account_ids: Vec, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, +} + +/// Selects public account IDs with pagination. /// -/// This filters accounts in-memory after loading only the account IDs (not commitments), -/// which is more efficient than loading full commitments when only IDs are needed. +/// Returns up to `page_size` public account IDs, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. +/// +/// Public accounts are those with `AccountStorageMode::Public` or `AccountStorageMode::Network`. +/// We identify them by checking `code_commitment IS NOT NULL` - public accounts store their full +/// state (including `code_commitment`), while private accounts only store the `account_commitment`. /// /// # Raw SQL /// @@ -305,31 +362,48 @@ pub(crate) fn select_all_account_commitments( /// accounts /// WHERE /// is_latest = 1 +/// AND code_commitment IS NOT NULL +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_public_account_ids( +pub(crate) fn select_public_account_ids_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - // We could technically use a `LIKE` constraint for both postgres and sqlite backends, - // but diesel doesn't expose that. - let raw: Vec> = - SelectDsl::select(schema::accounts::table, schema::accounts::account_id) - .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::>(conn)?; + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; - Result::from_iter( - raw.into_iter() - .map(|bytes| { - AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) - }) - .filter_map(|result| match result { - Ok(id) if id.has_public_state() => Some(Ok(id)), - Ok(_) => None, - Err(e) => Some(Err(e)), - }), - ) + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::code_commitment.is_not_null()) + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } + + let raw = query.load::>(conn)?; + + let mut account_ids: Vec = Result::from_iter(raw.into_iter().map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }))?; + + // If we got more than page_size, there are more results + let next_cursor = if account_ids.len() > page_size.get() { + account_ids.pop(); // Remove the extra element + account_ids.last().copied() + } else { + None + }; + + Ok(PublicAccountIdsPage { account_ids, next_cursor }) } /// Select account vault assets within a block range (inclusive). @@ -418,49 +492,6 @@ pub(crate) fn select_account_vault_assets( Ok((last_block_included, values)) } -/// Select [`AccountSummary`] from the DB using the given [`SqliteConnection`], given that the -/// account update was in the given block range (inclusive). -/// -/// # Returns -/// -/// The vector of [`AccountSummary`] with the matching accounts. -/// -/// # Raw SQL -/// -/// ```sql -/// SELECT -/// account_id, -/// account_commitment, -/// block_num -/// FROM -/// accounts -/// WHERE -/// block_num > ?1 AND -/// block_num <= ?2 AND -/// account_id IN (?3) -/// ORDER BY -/// block_num ASC -/// ``` -pub fn select_accounts_by_block_range( - conn: &mut SqliteConnection, - account_ids: &[AccountId], - block_range: RangeInclusive, -) -> Result, DatabaseError> { - QueryParamAccountIdLimit::check(account_ids.len())?; - - let desired_account_ids = serialize_vec(account_ids); - let raw: Vec = - SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) - .filter(schema::accounts::block_num.gt(block_range.start().to_raw_sql())) - .filter(schema::accounts::block_num.le(block_range.end().to_raw_sql())) - .filter(schema::accounts::account_id.eq_any(desired_account_ids)) - .order(schema::accounts::block_num.asc()) - .load::(conn)?; - // SAFETY `From` implies `TryFrom `AccountSummary` - Ok(vec_raw_try_into(raw).unwrap()) -} - /// Select all accounts from the DB using the given [`SqliteConnection`]. /// /// # Returns @@ -588,7 +619,7 @@ pub(crate) fn select_all_network_account_ids( pub struct StorageMapValue { pub block_num: BlockNumber, pub slot_name: StorageSlotName, - pub key: Word, + pub key: StorageMapKey, pub value: Word, } @@ -606,7 +637,7 @@ impl StorageMapValue { Ok(Self { block_num: BlockNumber::from_raw_sql(block_num)?, slot_name: StorageSlotName::from_raw_sql(slot_name)?, - key: Word::read_from_bytes(&key)?, + key: StorageMapKey::read_from_bytes(&key)?, value: Word::read_from_bytes(&value)?, }) } @@ -718,12 +749,41 @@ pub(crate) fn select_account_storage_map_values( /// Select latest account storage by querying `accounts.storage_header` where `is_latest=true` /// and reconstructing full storage from the header plus map values from /// `account_storage_map_values`. +/// +/// Attention: For large accounts it is prohibitively expensive! pub(crate) fn select_latest_account_storage( conn: &mut SqliteConnection, account_id: AccountId, ) -> Result { - use schema::account_storage_map_values as t; + let (storage_header, map_entries_by_slot) = + select_latest_account_storage_components(conn, account_id)?; + // Reconstruct StorageSlots from header slots + map entries + let slots = + Result::, DatabaseError>::from_iter(storage_header.slots().map(|slot_header| { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = + map_entries_by_slot.get(slot_header.name()).cloned().unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries.into_iter())?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + Ok(slot) + }))?; + + Ok(AccountStorage::new(slots)?) +} +/// Fetch account storage header and all storage maps +pub(crate) fn select_latest_account_storage_components( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { let account_id_bytes = account_id.to_bytes(); // Query storage header blob for this account where is_latest = true @@ -735,51 +795,96 @@ pub(crate) fn select_latest_account_storage( .optional()? .flatten(); - let Some(blob) = storage_blob else { - // No storage means empty storage - return Ok(AccountStorage::new(Vec::new())?); + let header = match storage_blob { + Some(blob) => AccountStorageHeader::read_from_bytes(&blob)?, + None => AccountStorageHeader::new(Vec::new())?, }; - // Deserialize the AccountStorageHeader from the blob - let header = AccountStorageHeader::read_from_bytes(&blob)?; + let entries = select_latest_storage_map_entries_all(conn, &account_id)?; + Ok((header, entries)) +} + +// TODO this is expensive and should only be called from tests +fn select_latest_storage_map_entries_all( + conn: &mut SqliteConnection, + account_id: &AccountId, +) -> Result>, DatabaseError> { + use schema::account_storage_map_values as t; + + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id.to_bytes())) + .filter(t::is_latest.eq(true)) + .load(conn)?; + + group_storage_map_entries(map_values) +} + +fn select_latest_storage_map_entries_for_slots( + conn: &mut SqliteConnection, + account_id: &AccountId, + slot_names: &[StorageSlotName], +) -> Result>, DatabaseError> { + use schema::account_storage_map_values as t; + + if slot_names.is_empty() { + return Ok(BTreeMap::new()); + } - // Query all latest map values for this account + if let [slot_name] = slot_names { + let entries = select_latest_storage_map_entries_for_slot(conn, account_id, slot_name)?; + if entries.is_empty() { + return Ok(BTreeMap::new()); + } + + let mut map_entries = BTreeMap::new(); + map_entries.insert(slot_name.clone(), entries); + return Ok(map_entries); + } + + let slot_names = Vec::from_iter(slot_names.iter().cloned().map(StorageSlotName::to_raw_sql)); let map_values: Vec<(String, Vec, Vec)> = SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) - .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::account_id.eq(&account_id.to_bytes())) .filter(t::is_latest.eq(true)) + .filter(t::slot_name.eq_any(slot_names)) .load(conn)?; - // Group map values by slot name - let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + group_storage_map_entries(map_values) +} + +fn select_latest_storage_map_entries_for_slot( + conn: &mut SqliteConnection, + account_id: &AccountId, + slot_name: &StorageSlotName, +) -> Result, DatabaseError> { + use schema::account_storage_map_values as t; + + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id.to_bytes())) + .filter(t::is_latest.eq(true)) + .filter(t::slot_name.eq(slot_name.clone().to_raw_sql())) + .load(conn)?; + + Ok(group_storage_map_entries(map_values)?.remove(slot_name).unwrap_or_default()) +} + +fn group_storage_map_entries( + map_values: Vec<(String, Vec, Vec)>, +) -> Result>, DatabaseError> { + let mut map_entries_by_slot: BTreeMap> = + BTreeMap::new(); for (slot_name_str, key_bytes, value_bytes) in map_values { let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) })?; - let key = Word::read_from_bytes(&key_bytes)?; + let key = StorageMapKey::read_from_bytes(&key_bytes)?; let value = Word::read_from_bytes(&value_bytes)?; - map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + map_entries_by_slot.entry(slot_name).or_default().insert(key, value); } - // Reconstruct StorageSlots from header slots + map entries - let mut slots = Vec::new(); - for slot_header in header.slots() { - let slot = match slot_header.slot_type() { - StorageSlotType::Value => { - // For value slots, the header value IS the slot value - StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) - }, - StorageSlotType::Map => { - // For map slots, reconstruct from map entries - let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); - let storage_map = StorageMap::with_entries(entries)?; - StorageSlot::with_map(slot_header.name().clone(), storage_map) - }, - }; - slots.push(slot); - } - - Ok(AccountStorage::new(slots)?) + Ok(map_entries_by_slot) } // ACCOUNT MUTATION @@ -851,11 +956,13 @@ pub(crate) fn insert_account_vault_asset( // First, update any existing rows with the same (account_id, vault_key) to set // is_latest=false let vault_key: Word = vault_key.into(); + let vault_key_bytes = vault_key.to_bytes(); + let account_id_bytes = account_id.to_bytes(); let update_count = diesel::update(schema::account_vault_assets::table) .filter( schema::account_vault_assets::account_id - .eq(&account_id.to_bytes()) - .and(schema::account_vault_assets::vault_key.eq(&vault_key.to_bytes())) + .eq(account_id_bytes) + .and(schema::account_vault_assets::vault_key.eq(vault_key_bytes)) .and(schema::account_vault_assets::is_latest.eq(true)), ) .set(schema::account_vault_assets::is_latest.eq(false)) @@ -883,7 +990,7 @@ pub(crate) fn insert_account_storage_map_value( account_id: AccountId, block_num: BlockNumber, slot_name: StorageSlotName, - key: Word, + key: StorageMapKey, value: Word, ) -> Result { let account_id = account_id.to_bytes(); @@ -918,8 +1025,171 @@ pub(crate) fn insert_account_storage_map_value( Ok(update_count + insert_count) } +type PendingStorageInserts = Vec<(AccountId, StorageSlotName, StorageMapKey, Word)>; +type PendingAssetInserts = Vec<(AccountId, AssetVaultKey, Option)>; + +fn prepare_full_account_update( + update: &BlockAccountUpdate, + account: Account, +) -> Result<(AccountStateForInsert, PendingStorageInserts, PendingAssetInserts), DatabaseError> { + let account_id = account.id(); + + // sanity check the commitment of account matches the final state commitment + if account.to_commitment() != update.final_state_commitment() { + return Err(DatabaseError::AccountCommitmentsMismatch { + calculated: account.to_commitment(), + expected: update.final_state_commitment(), + }); + } + + // collect storage-map inserts to apply after account upsert + let mut storage = Vec::new(); + for slot in account.storage().slots() { + if let StorageSlotContent::Map(storage_map) = slot.content() { + for (key, value) in storage_map.entries() { + storage.push((account_id, slot.name().clone(), *key, *value)); + } + } + } + + // collect vault-asset inserts to apply after account upsert + let mut assets = Vec::new(); + for asset in account.vault().assets() { + // Only insert assets with non-zero values for fungible assets + let should_insert = match asset { + Asset::Fungible(fungible) => fungible.amount() > 0, + Asset::NonFungible(_) => true, + }; + if should_insert { + assets.push((account_id, asset.vault_key(), Some(asset))); + } + } + + Ok((AccountStateForInsert::FullAccount(account), storage, assets)) +} + +/// Prepare partial delta data for account upserts and follow-up storage and vault inserts. +fn prepare_partial_account_update( + conn: &mut SqliteConnection, + update: &BlockAccountUpdate, + account_id: AccountId, + delta: &miden_protocol::account::delta::AccountDelta, + block_num: BlockNumber, +) -> Result<(AccountStateForInsert, PendingStorageInserts, PendingAssetInserts), DatabaseError> { + // Build the minimal account state needed for partial delta application. + // Only load the storage map entries and vault balances that will receive updates. + // The next line fetches the header, which will always change unless the delta is empty. + let state_headers = select_minimal_account_state_headers(conn, account_id)?; + + // --- Process asset updates. --------------------------------- + // Only query balances for faucet_ids that are being updated. + let faucet_ids = + Vec::from_iter(delta.vault().fungible().iter().map(|(faucet_id, _)| *faucet_id)); + let prev_balances = select_vault_balances_by_faucet_ids(conn, account_id, &faucet_ids)?; + + // Encode `Some` as update and `None` as removal. + let mut assets = Vec::new(); + + // Update fungible assets. + for (faucet_id, amount_delta) in delta.vault().fungible().iter() { + let prev_amount = prev_balances.get(faucet_id).copied().unwrap_or(0); + let prev_asset = FungibleAsset::new(*faucet_id, prev_amount)?; + let amount_abs = amount_delta.unsigned_abs(); + let delta = FungibleAsset::new(*faucet_id, amount_abs)?; + let new_balance = if *amount_delta < 0 { + prev_asset.sub(delta)? + } else { + prev_asset.add(delta)? + }; + let update_or_remove = if new_balance.amount() == 0 { + None + } else { + Some(Asset::from(new_balance)) + }; + assets.push((account_id, new_balance.vault_key(), update_or_remove)); + } + + // Update non-fungible assets. + for (asset, delta_action) in delta.vault().non_fungible().iter() { + let asset_update = match delta_action { + NonFungibleDeltaAction::Add => Some(Asset::NonFungible(*asset)), + NonFungibleDeltaAction::Remove => None, + }; + assets.push((account_id, asset.vault_key(), asset_update)); + } + + // --- Collect storage map updates. --------------------------- + + let mut storage = Vec::new(); + for (slot_name, map_delta) in delta.storage().maps() { + for (key, value) in map_delta.entries() { + storage.push((account_id, slot_name.clone(), (*key).into_inner(), *value)); + } + } + + // First collect entries that have associated changes. + let slot_names = Vec::from_iter(delta.storage().maps().filter_map(|(slot_name, map_delta)| { + if map_delta.is_empty() { + None + } else { + Some(slot_name.clone()) + } + })); + + let map_entries = select_latest_storage_map_entries_for_slots(conn, &account_id, &slot_names)?; + + // Apply the delta storage to the given storage header. + let new_storage_header = + apply_storage_delta(&state_headers.storage_header, delta.storage(), &map_entries)?; + + // --- Update the vault root by constructing the asset vault from DB. + let new_vault_root = { + let (_last_block, assets) = + select_account_vault_assets(conn, account_id, BlockNumber::GENESIS..=block_num)?; + let assets: Vec = assets.into_iter().filter_map(|entry| entry.asset).collect(); + let mut vault = AssetVault::new(&assets)?; + vault.apply_delta(delta.vault())?; + vault.root() + }; + + // --- Compute updated account state for the accounts row. --- + // Apply nonce delta. + let new_nonce_value = state_headers + .nonce + .as_int() + .checked_add(delta.nonce_delta().as_int()) + .ok_or_else(|| { + DatabaseError::DataCorrupted(format!("Nonce overflow for account {account_id}")) + })?; + let new_nonce = Felt::new(new_nonce_value); + + // Create minimal account state data for the row insert. + let account_state = PartialAccountState { + nonce: new_nonce, + code_commitment: state_headers.code_commitment, + storage_header: new_storage_header, + vault_root: new_vault_root, + }; + + let account_header = miden_protocol::account::AccountHeader::new( + account_id, + account_state.nonce, + account_state.vault_root, + account_state.storage_header.to_commitment(), + account_state.code_commitment, + ); + + if account_header.to_commitment() != update.final_state_commitment() { + return Err(DatabaseError::AccountCommitmentsMismatch { + calculated: account_header.to_commitment(), + expected: update.final_state_commitment(), + }); + } + + Ok((AccountStateForInsert::PartialState(account_state), storage, assets)) +} + /// Attention: Assumes the account details are NOT null! The schema explicitly allows this though! -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -943,7 +1213,7 @@ pub(crate) fn upsert_accounts( }; // Preserve the original creation block when updating existing accounts. - let created_at_block = QueryDsl::select( + let created_at_block_raw = QueryDsl::select( schema::accounts::table.filter( schema::accounts::account_id .eq(&account_id_bytes) @@ -955,95 +1225,34 @@ pub(crate) fn upsert_accounts( .optional() .map_err(DatabaseError::Diesel)? .unwrap_or(block_num_raw); + let created_at_block = BlockNumber::from_raw_sql(created_at_block_raw)?; - // NOTE: we collect storage / asset inserts to apply them only after the account row is - // written. The storage and vault tables have FKs pointing to `accounts (account_id, + // NOTE: we collect storage / asset inserts to apply them only after the account row is + // written. The storage and vault tables have FKs pointing to accounts `(account_id, // block_num)`, so inserting them earlier would violate those constraints when inserting a // brand-new account. - let (full_account, pending_storage_inserts, pending_asset_inserts) = match update.details() + let (account_state, pending_storage_inserts, pending_asset_inserts) = match update.details() { - AccountUpdateDetails::Private => (None, vec![], vec![]), + AccountUpdateDetails::Private => (AccountStateForInsert::Private, vec![], vec![]), + // New account is always a full account, but also comes as an update AccountUpdateDetails::Delta(delta) if delta.is_full_state() => { - let account = Account::try_from(delta)?; + let account = Account::try_from(delta) + .expect("Delta to full account always works for full state deltas"); debug_assert_eq!(account_id, account.id()); - if account.commitment() != update.final_state_commitment() { - return Err(DatabaseError::AccountCommitmentsMismatch { - calculated: account.commitment(), - expected: update.final_state_commitment(), - }); - } - - // collect storage-map inserts to apply after account upsert - let mut storage = Vec::new(); - for slot in account.storage().slots() { - if let StorageSlotContent::Map(storage_map) = slot.content() { - for (key, value) in storage_map.entries() { - storage.push((account_id, slot.name().clone(), *key, *value)); - } - } - } - - // collect vault-asset inserts to apply after account upsert - let mut assets = Vec::new(); - for asset in account.vault().assets() { - // Only insert assets with non-zero values for fungible assets - let should_insert = match asset { - Asset::Fungible(fungible) => fungible.amount() > 0, - Asset::NonFungible(_) => true, - }; - if should_insert { - assets.push((account_id, asset.vault_key(), Some(asset))); - } - } - - (Some(account), storage, assets) + prepare_full_account_update(update, account)? }, + // Update of an existing account AccountUpdateDetails::Delta(delta) => { - // Reconstruct the full account from database tables - let account = select_full_account(conn, account_id)?; - - // --- collect storage map updates ---------------------------- - - let mut storage = Vec::new(); - for (slot_name, map_delta) in delta.storage().maps() { - for (key, value) in map_delta.entries() { - storage.push((account_id, slot_name.clone(), (*key).into(), *value)); - } - } - - // apply delta to the account; we need to do this before we process asset updates - // because we currently need to get the current value of fungible assets from the - // account - let account_after = apply_delta(account, delta, &update.final_state_commitment())?; - - // --- process asset updates ---------------------------------- - - let mut assets = Vec::new(); - - for (faucet_id, _) in delta.vault().fungible().iter() { - let current_amount = account_after.vault().get_balance(*faucet_id).unwrap(); - let asset: Asset = FungibleAsset::new(*faucet_id, current_amount)?.into(); - let update_or_remove = if current_amount == 0 { None } else { Some(asset) }; - - assets.push((account_id, asset.vault_key(), update_or_remove)); - } - - for (asset, delta_action) in delta.vault().non_fungible().iter() { - let asset_update = match delta_action { - NonFungibleDeltaAction::Add => Some(Asset::NonFungible(*asset)), - NonFungibleDeltaAction::Remove => None, - }; - assets.push((account_id, asset.vault_key(), asset_update)); - } - - (Some(account_after), storage, assets) + prepare_partial_account_update(conn, update, account_id, delta, block_num)? }, }; - if let Some(code) = full_account.as_ref().map(Account::code) { + // Insert account _code_ for full accounts (new account creation) + if let AccountStateForInsert::FullAccount(ref account) = account_state { + let code = account.code(); let code_value = AccountCodeRowInsert { code_commitment: code.commitment().to_bytes(), code: code.to_bytes(), @@ -1065,22 +1274,30 @@ pub(crate) fn upsert_accounts( .set(schema::accounts::is_latest.eq(false)) .execute(conn)?; - let account_value = AccountRowInsert { - account_id: account_id_bytes, - network_account_type: network_account_type.to_raw_sql(), - account_commitment: update.final_state_commitment().to_bytes(), - block_num: block_num_raw, - nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), - code_commitment: full_account - .as_ref() - .map(|account| account.code().commitment().to_bytes()), - // Store only the header (slot metadata + map roots), not full storage with map contents - storage_header: full_account - .as_ref() - .map(|account| account.storage().to_header().to_bytes()), - vault_root: full_account.as_ref().map(|account| account.vault().root().to_bytes()), - is_latest: true, - created_at_block, + let account_value = match &account_state { + AccountStateForInsert::Private => AccountRowInsert::new_private( + account_id, + network_account_type, + update.final_state_commitment(), + block_num, + created_at_block, + ), + AccountStateForInsert::FullAccount(account) => AccountRowInsert::new_from_account( + account_id, + network_account_type, + update.final_state_commitment(), + block_num, + created_at_block, + account, + ), + AccountStateForInsert::PartialState(state) => AccountRowInsert::new_from_partial( + account_id, + network_account_type, + update.final_state_commitment(), + block_num, + created_at_block, + state, + ), }; diesel::insert_into(schema::accounts::table) @@ -1091,6 +1308,7 @@ pub(crate) fn upsert_accounts( .execute(conn)?; // insert pending storage map entries + // TODO consider batching for (acc_id, slot_name, key, value) in pending_storage_inserts { insert_account_storage_map_value(conn, acc_id, block_num, slot_name, key, value)?; } @@ -1105,25 +1323,6 @@ pub(crate) fn upsert_accounts( Ok(count) } -/// Deserializes account and applies account delta. -pub(crate) fn apply_delta( - mut account: Account, - delta: &AccountDelta, - final_state_commitment: &Word, -) -> crate::db::Result { - account.apply_delta(delta)?; - - let actual_commitment = account.commitment(); - if &actual_commitment != final_state_commitment { - return Err(DatabaseError::AccountCommitmentsMismatch { - calculated: actual_commitment, - expected: *final_state_commitment, - }); - } - - Ok(account) -} - #[derive(Insertable, Debug, Clone)] #[diesel(table_name = schema::account_codes)] pub(crate) struct AccountCodeRowInsert { @@ -1146,6 +1345,76 @@ pub(crate) struct AccountRowInsert { pub(crate) created_at_block: i64, } +impl AccountRowInsert { + /// Creates an insert row for a private account (no public state). + fn new_private( + account_id: AccountId, + network_account_type: NetworkAccountType, + account_commitment: Word, + block_num: BlockNumber, + created_at_block: BlockNumber, + ) -> Self { + Self { + account_id: account_id.to_bytes(), + network_account_type: network_account_type.to_raw_sql(), + account_commitment: account_commitment.to_bytes(), + block_num: block_num.to_raw_sql(), + nonce: None, + code_commitment: None, + storage_header: None, + vault_root: None, + is_latest: true, + created_at_block: created_at_block.to_raw_sql(), + } + } + + /// Creates an insert row from a full account (new account creation). + fn new_from_account( + account_id: AccountId, + network_account_type: NetworkAccountType, + account_commitment: Word, + block_num: BlockNumber, + created_at_block: BlockNumber, + account: &Account, + ) -> Self { + Self { + account_id: account_id.to_bytes(), + network_account_type: network_account_type.to_raw_sql(), + account_commitment: account_commitment.to_bytes(), + block_num: block_num.to_raw_sql(), + nonce: Some(nonce_to_raw_sql(account.nonce())), + code_commitment: Some(account.code().commitment().to_bytes()), + storage_header: Some(account.storage().to_header().to_bytes()), + vault_root: Some(account.vault().root().to_bytes()), + is_latest: true, + created_at_block: created_at_block.to_raw_sql(), + } + } + + /// Creates an insert row from a partial account state (delta update). + fn new_from_partial( + account_id: AccountId, + network_account_type: NetworkAccountType, + account_commitment: Word, + block_num: BlockNumber, + created_at_block: BlockNumber, + state: &PartialAccountState, + ) -> Self { + Self { + account_id: account_id.to_bytes(), + network_account_type: network_account_type.to_raw_sql(), + account_commitment: account_commitment.to_bytes(), + block_num: block_num.to_raw_sql(), + nonce: Some(nonce_to_raw_sql(state.nonce)), + code_commitment: Some(state.code_commitment.to_bytes()), + storage_header: Some(state.storage_header.to_bytes()), + vault_root: Some(state.vault_root.to_bytes()), + is_latest: true, + created_at_block: created_at_block.to_raw_sql(), + } + } +} + #[derive(Insertable, AsChangeset, Debug, Clone)] #[diesel(table_name = schema::account_vault_assets)] pub(crate) struct AccountAssetRowInsert { @@ -1189,3 +1458,78 @@ pub(crate) struct AccountStorageMapRowInsert { pub(crate) value: Vec, pub(crate) is_latest: bool, } + +// CLEANUP FUNCTIONS +// ================================================================================================ + +/// Number of historical blocks to retain for vault assets and storage map values. +/// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be deleted, +/// except for entries marked with `is_latest=true` which are always retained. +pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; + +/// Clean up old entries for all accounts, deleting entries older than the retention window. +/// +/// Deletes rows where `block_num < chain_tip - HISTORICAL_BLOCK_RETENTION` and `is_latest = false`. +/// This is a simple and efficient approach that doesn't require window functions. +/// +/// # Returns +/// A tuple of `(vault_assets_deleted, storage_map_values_deleted)` +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +pub(crate) fn prune_history( + conn: &mut SqliteConnection, + chain_tip: BlockNumber, +) -> Result<(usize, usize), DatabaseError> { + let cutoff_block = i64::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); + tracing::Span::current().record("cutoff_block", cutoff_block); + let vault_deleted = prune_account_vault_assets(conn, cutoff_block)?; + let storage_deleted = prune_account_storage_map_values(conn, cutoff_block)?; + + Ok((vault_deleted, storage_deleted)) +} + +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +fn prune_account_vault_assets( + conn: &mut SqliteConnection, + cutoff_block: i64, +) -> Result { + diesel::delete( + schema::account_vault_assets::table.filter( + schema::account_vault_assets::block_num + .lt(cutoff_block) + .and(schema::account_vault_assets::is_latest.eq(false)), + ), + ) + .execute(conn) + .map_err(DatabaseError::Diesel) +} + +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +fn prune_account_storage_map_values( + conn: &mut SqliteConnection, + cutoff_block: i64, +) -> Result { + diesel::delete( + schema::account_storage_map_values::table.filter( + schema::account_storage_map_values::block_num + .lt(cutoff_block) + .and(schema::account_storage_map_values::is_latest.eq(false)), + ), + ) + .execute(conn) + .map_err(DatabaseError::Diesel) +} diff --git a/crates/store/src/db/models/queries/accounts/delta.rs b/crates/store/src/db/models/queries/accounts/delta.rs new file mode 100644 index 0000000000..8bab2b1220 --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/delta.rs @@ -0,0 +1,244 @@ +//! Optimized delta update support for account updates. +//! +//! Provides functions and types for applying partial delta updates to accounts +//! without loading the full account state. Avoids loading: +//! - Full account code bytes +//! - All storage map entries +//! - All vault assets +//! +//! Instead, only the minimal data needed for the update is fetched. + +use std::collections::BTreeMap; + +use diesel::query_dsl::methods::SelectDsl; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection}; +use miden_protocol::account::delta::AccountStorageDelta; +use miden_protocol::account::{ + Account, + AccountId, + AccountStorageHeader, + StorageMap, + StorageMapKey, + StorageSlotHeader, + StorageSlotName, +}; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{EMPTY_WORD, Felt, Word}; + +use crate::db::models::conv::raw_sql_to_nonce; +use crate::db::schema; +use crate::errors::DatabaseError; + +#[cfg(test)] +mod tests; + +// TYPES +// ================================================================================================ + +/// Raw row type for account state delta queries. +/// +/// Fields: (`nonce`, `code_commitment`, `storage_header`) +#[derive(diesel::prelude::Queryable)] +struct AccountStateDeltaRow { + nonce: Option, + code_commitment: Option>, + storage_header: Option>, +} + +/// Data needed for applying a delta update to an existing account. +/// Fetches only the minimal data required, avoiding loading full code and storage. +#[derive(Debug, Clone)] +pub(super) struct AccountStateHeadersForDelta { + pub nonce: Felt, + pub code_commitment: Word, + pub storage_header: AccountStorageHeader, +} + +/// Minimal account state computed from a partial delta update. +/// Contains only the fields needed for the accounts table row insert. +#[derive(Debug, Clone)] +pub(super) struct PartialAccountState { + pub nonce: Felt, + pub code_commitment: Word, + pub storage_header: AccountStorageHeader, + pub vault_root: Word, +} + +/// Represents the account state to be inserted, either from a full account +/// or from a partial delta update. +pub(super) enum AccountStateForInsert { + /// Private account - no public state stored + Private, + /// Full account state (from full-state delta, i.e., new account) + FullAccount(Account), + /// Partial account state (from partial delta, i.e., existing account update) + PartialState(PartialAccountState), +} + +// QUERIES +// ================================================================================================ + +/// Selects the minimal account state needed for applying a delta update. +/// +/// Optimized query that only fetches: +/// - `nonce` (to add `nonce_delta`) +/// - `code_commitment` (unchanged in partial deltas) +/// - `storage_header` (to apply storage delta) +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT nonce, code_commitment, storage_header +/// FROM accounts +/// WHERE account_id = ?1 AND is_latest = 1 +/// ``` +pub(super) fn select_minimal_account_state_headers( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + let row: AccountStateDeltaRow = SelectDsl::select( + schema::accounts::table, + ( + schema::accounts::nonce, + schema::accounts::code_commitment, + schema::accounts::storage_header, + ), + ) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result(conn) + .optional()? + .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; + + let nonce = raw_sql_to_nonce(row.nonce.ok_or_else(|| { + DatabaseError::DataCorrupted(format!("No nonce found for account {account_id}")) + })?); + + let code_commitment = row + .code_commitment + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .ok_or_else(|| { + DatabaseError::DataCorrupted(format!( + "No code_commitment found for account {account_id}" + )) + })?; + + let storage_header = match row.storage_header { + Some(bytes) => AccountStorageHeader::read_from_bytes(&bytes)?, + None => AccountStorageHeader::new(Vec::new())?, + }; + + Ok(AccountStateHeadersForDelta { nonce, code_commitment, storage_header }) +} + +/// Selects vault balances for specific faucet IDs. +/// +/// Optimized query that only fetches balances for the faucet IDs +/// that are being updated by a delta, rather than loading all vault assets. +/// +/// Returns a map from `faucet_id` to the current balance (0 if not found). +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT vault_key, asset +/// FROM account_vault_assets +/// WHERE account_id = ?1 AND is_latest = 1 AND vault_key IN (?2, ?3, ...) +/// ``` +pub(super) fn select_vault_balances_by_faucet_ids( + conn: &mut SqliteConnection, + account_id: AccountId, + faucet_ids: &[AccountId], +) -> Result, DatabaseError> { + use schema::account_vault_assets as vault; + + if faucet_ids.is_empty() { + return Ok(BTreeMap::new()); + } + + let account_id_bytes = account_id.to_bytes(); + + // Compute vault keys for each faucet ID + let vault_keys: Vec> = Result::from_iter(faucet_ids.iter().map(|faucet_id| { + let asset = FungibleAsset::new(*faucet_id, 0) + .map_err(|_| DatabaseError::DataCorrupted(format!("Invalid faucet id {faucet_id}")))?; + let key: Word = asset.vault_key().into(); + Ok::<_, DatabaseError>(key.to_bytes()) + }))?; + + let entries: Vec<(Vec, Option>)> = + SelectDsl::select(vault::table, (vault::vault_key, vault::asset)) + .filter(vault::account_id.eq(&account_id_bytes)) + .filter(vault::is_latest.eq(true)) + .filter(vault::vault_key.eq_any(&vault_keys)) + .load(conn)?; + + let mut balances = BTreeMap::from_iter(faucet_ids.iter().map(|faucet_id| (*faucet_id, 0))); + + for (_vault_key_bytes, maybe_asset_bytes) in entries { + if let Some(asset_bytes) = maybe_asset_bytes { + let asset = Asset::read_from_bytes(&asset_bytes)?; + if let Asset::Fungible(fungible) = asset { + balances.insert(fungible.faucet_id(), fungible.amount()); + } + } + } + + Ok(balances) +} + +// HELPER FUNCTIONS +// ================================================================================================ + +/// Applies storage delta to an existing storage header using precomputed map roots. +/// +/// For value slots, updates the slot value directly. +/// For map slots, uses the precomputed roots for updated maps. +pub(super) fn apply_storage_delta( + header: &AccountStorageHeader, + delta: &AccountStorageDelta, + map_entries: &BTreeMap>, +) -> Result { + let mut value_updates: BTreeMap<&StorageSlotName, Word> = BTreeMap::new(); + let mut map_updates: BTreeMap<&StorageSlotName, Word> = BTreeMap::new(); + + for (slot_name, new_value) in delta.values() { + value_updates.insert(slot_name, *new_value); + } + + for (slot_name, map_delta) in delta.maps() { + if map_delta.is_empty() { + continue; + } + + let mut entries = map_entries.get(slot_name).cloned().unwrap_or_default(); + for (key, value) in map_delta.entries() { + if *value == EMPTY_WORD { + entries.remove(&(*key).into_inner()); + } else { + entries.insert((*key).into_inner(), *value); + } + } + + let storage_map = StorageMap::with_entries(entries.into_iter()) + .map_err(DatabaseError::StorageMapError)?; + map_updates.insert(slot_name, storage_map.root()); + } + + let slots = Vec::from_iter(header.slots().map(|slot| { + let slot_name = slot.name(); + if let Some(&new_value) = value_updates.get(slot_name) { + StorageSlotHeader::new(slot_name.clone(), slot.slot_type(), new_value) + } else if let Some(&new_root) = map_updates.get(slot_name) { + StorageSlotHeader::new(slot_name.clone(), slot.slot_type(), new_root) + } else { + slot.clone() + } + })); + + AccountStorageHeader::new(slots).map_err(|e| { + DatabaseError::DataCorrupted(format!("Failed to create storage header: {e:?}")) + }) +} diff --git a/crates/store/src/db/models/queries/accounts/delta/tests.rs b/crates/store/src/db/models/queries/accounts/delta/tests.rs new file mode 100644 index 0000000000..7f31003259 --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/delta/tests.rs @@ -0,0 +1,692 @@ +//! +//! Tests for delta update functionality. + +use std::collections::BTreeMap; + +use assert_matches::assert_matches; +use diesel::{Connection, ExpressionMethods, QueryDsl, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use miden_node_utils::fee::test_fee_params; +use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; +use miden_protocol::account::component::AccountComponentMetadata; +use miden_protocol::account::delta::{ + AccountStorageDelta, + AccountUpdateDetails, + AccountVaultDelta, + StorageMapDelta, + StorageSlotDelta, +}; +use miden_protocol::account::{ + AccountBuilder, + AccountComponent, + AccountDelta, + AccountId, + AccountStorageMode, + AccountType, + StorageMap, + StorageMapKey, + StorageSlot, + StorageSlotName, +}; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::testing::account_id::{ + ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, + ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET_1, +}; +use miden_protocol::utils::Serializable; +use miden_protocol::{EMPTY_WORD, Felt, Word}; +use miden_standards::account::auth::AuthSingleSig; +use miden_standards::code_builder::CodeBuilder; + +use crate::db::migrations::MIGRATIONS; +use crate::db::models::queries::accounts::{ + select_account_header_with_storage_header_at_block, + select_account_vault_at_block, + select_full_account, + upsert_accounts, +}; +use crate::db::schema::accounts; + +fn setup_test_db() -> SqliteConnection { + let mut conn = + SqliteConnection::establish(":memory:").expect("Failed to create in-memory database"); + + conn.run_pending_migrations(MIGRATIONS).expect("Failed to run migrations"); + + conn +} + +fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { + use crate::db::schema::block_headers; + + let secret_key = SecretKey::new(); + let block_header = BlockHeader::new( + 1_u8.into(), + Word::default(), + block_num, + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + secret_key.public_key(), + test_fee_params(), + 0_u8.into(), + ); + let signature = secret_key.sign(block_header.commitment()); + + diesel::insert_into(block_headers::table) + .values(( + block_headers::block_num.eq(i64::from(block_num.as_u32())), + block_headers::block_header.eq(block_header.to_bytes()), + block_headers::signature.eq(signature.to_bytes()), + block_headers::commitment.eq(block_header.commitment().to_bytes()), + )) + .execute(conn) + .expect("Failed to insert block header"); +} + +/// Tests that the optimized delta update path produces the same results as the old +/// method that loads the full account. +/// +/// Covers partial deltas that update: +/// - Nonce (via `nonce_delta`) +/// - Value storage slots +/// - Vault assets (fungible) starting from empty vault +/// +/// The test ensures the optimized code path in `upsert_accounts` produces correct results +/// by comparing the final account state against a manually constructed expected state. +#[test] +#[expect( + clippy::too_many_lines, + reason = "test exercises multiple storage and vault paths" +)] +fn optimized_delta_matches_full_account_method() { + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_SEED: [u8; 32] = [10u8; 32]; + // Use fixed block numbers to ensure deterministic ordering. + const BLOCK_NUM_1: u32 = 1; + const BLOCK_NUM_2: u32 = 2; + // Use explicit slot indices to avoid magic numbers. + const SLOT_INDEX_PRIMARY: usize = 0; + const SLOT_INDEX_SECONDARY: usize = 1; + // Use fixed values to verify storage delta updates. + const INITIAL_SLOT_VALUES: [u64; 4] = [100, 200, 300, 400]; + const UPDATED_SLOT_VALUES: [u64; 4] = [111, 222, 333, 444]; + // Use fixed delta values to validate nonce and vault changes. + const NONCE_DELTA: u64 = 5; + const VAULT_AMOUNT: u64 = 500; + + let mut conn = setup_test_db(); + + // Create an account with value slots only (no map slots to avoid SmtForest complexity) + let slot_value_initial = Word::from([ + Felt::new(INITIAL_SLOT_VALUES[0]), + Felt::new(INITIAL_SLOT_VALUES[1]), + Felt::new(INITIAL_SLOT_VALUES[2]), + Felt::new(INITIAL_SLOT_VALUES[3]), + ]); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX_PRIMARY), slot_value_initial), + StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX_SECONDARY), EMPTY_WORD), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + let block_1 = BlockNumber::from(BLOCK_NUM_1); + let block_2 = BlockNumber::from(BLOCK_NUM_2); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + // Insert the initial account at block 1 (full state) - no vault assets + let delta_initial = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_initial = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta_initial), + ); + upsert_accounts(&mut conn, &[account_update_initial], block_1).expect("Initial upsert failed"); + + // Verify initial state + let full_account_before = + select_full_account(&mut conn, account.id()).expect("Failed to load full account"); + assert_eq!(full_account_before.nonce(), account.nonce()); + assert!( + full_account_before.vault().assets().next().is_none(), + "Vault should be empty initially" + ); + + // Create a partial delta to apply: + // - Increment nonce by 5 + // - Update the first value slot + // - Add 500 tokens to the vault (starting from empty) + + let new_slot_value = Word::from([ + Felt::new(UPDATED_SLOT_VALUES[0]), + Felt::new(UPDATED_SLOT_VALUES[1]), + Felt::new(UPDATED_SLOT_VALUES[2]), + Felt::new(UPDATED_SLOT_VALUES[3]), + ]); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Find the slot name from the account's storage + let value_slot_name = + full_account_before.storage().slots().iter().next().unwrap().name().clone(); + + // Build the storage delta (value slot update only) + let storage_delta = { + let deltas = BTreeMap::from_iter([( + value_slot_name.clone(), + StorageSlotDelta::Value(new_slot_value), + )]); + AccountStorageDelta::from_raw(deltas) + }; + + // Build the vault delta (add 500 tokens to empty vault) + let vault_delta = { + let mut delta = AccountVaultDelta::default(); + let asset = Asset::Fungible(FungibleAsset::new(faucet_id, VAULT_AMOUNT).unwrap()); + delta.add_asset(asset).unwrap(); + delta + }; + + // Create a partial delta + let nonce_delta = Felt::new(NONCE_DELTA); + let partial_delta = AccountDelta::new( + full_account_before.id(), + storage_delta.clone(), + vault_delta.clone(), + nonce_delta, + ) + .unwrap(); + assert!(!partial_delta.is_full_state(), "Delta should be partial, not full state"); + + // Construct the expected final account by applying the delta + let expected_nonce = Felt::new(full_account_before.nonce().as_int() + nonce_delta.as_int()); + let expected_code_commitment = full_account_before.code().commitment(); + + let mut expected_account = full_account_before.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let final_account_for_commitment = expected_account; + + let final_commitment = final_account_for_commitment.to_commitment(); + let expected_storage_commitment = final_account_for_commitment.storage().to_commitment(); + let expected_vault_root = final_account_for_commitment.vault().root(); + + // ----- Apply the partial delta via upsert_accounts (optimized path) ----- + let account_update = BlockAccountUpdate::new( + account.id(), + final_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + upsert_accounts(&mut conn, &[account_update], block_2).expect("Partial delta upsert failed"); + + // ----- VERIFY: Query the DB and check that optimized path produced correct results ----- + + let (header_after, storage_header_after) = + select_account_header_with_storage_header_at_block(&mut conn, account.id(), block_2) + .expect("Query should succeed") + .expect("Account should exist"); + + // Verify nonce + assert_eq!( + header_after.nonce(), + expected_nonce, + "Nonce mismatch: optimized={:?}, expected={:?}", + header_after.nonce(), + expected_nonce + ); + + // Verify code commitment (should be unchanged) + assert_eq!( + header_after.code_commitment(), + expected_code_commitment, + "Code commitment mismatch" + ); + + // Verify storage header commitment + assert_eq!( + storage_header_after.to_commitment(), + expected_storage_commitment, + "Storage header commitment mismatch" + ); + + // Verify vault assets + let vault_assets_after = select_account_vault_at_block(&mut conn, account.id(), block_2) + .expect("Query vault should succeed"); + + assert_eq!(vault_assets_after.len(), 1, "Should have 1 vault asset"); + assert_matches!(&vault_assets_after[0], Asset::Fungible(f) => { + assert_eq!(f.faucet_id(), faucet_id, "Faucet ID should match"); + assert_eq!(f.amount(), VAULT_AMOUNT, "Amount should be 500"); + }); + + // Verify the account commitment matches + assert_eq!( + header_after.to_commitment(), + final_commitment, + "Account commitment should match the expected final state" + ); + + // Also verify we can load the full account and it has correct state + let full_account_after = select_full_account(&mut conn, account.id()) + .expect("Failed to load full account after update"); + + assert_eq!(full_account_after.nonce(), expected_nonce, "Full account nonce mismatch"); + assert_eq!( + full_account_after.storage().to_commitment(), + expected_storage_commitment, + "Full account storage commitment mismatch" + ); + assert_eq!( + full_account_after.vault().root(), + expected_vault_root, + "Full account vault root mismatch" + ); +} + +#[test] +fn optimized_delta_updates_non_empty_vault() { + const ACCOUNT_SEED: [u8; 32] = [40u8; 32]; + const BLOCK_NUM_1: u32 = 1; + const BLOCK_NUM_2: u32 = 2; + const NONCE_DELTA: u64 = 1; + const INITIAL_AMOUNT: u64 = 700; + const ADDED_AMOUNT: u64 = 250; + const SLOT_INDEX: usize = 0; + + let mut conn = setup_test_db(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let faucet_id_1 = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET_1).unwrap(); + let initial_asset = Asset::Fungible(FungibleAsset::new(faucet_id, INITIAL_AMOUNT).unwrap()); + + let component_storage = + vec![StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX), EMPTY_WORD)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc vault push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .with_assets([initial_asset]) + .build_existing() + .unwrap(); + + let block_1 = BlockNumber::from(BLOCK_NUM_1); + let block_2 = BlockNumber::from(BLOCK_NUM_2); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + let delta_initial = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_initial = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta_initial), + ); + upsert_accounts(&mut conn, &[account_update_initial], block_1).expect("Initial upsert failed"); + + let full_account_before = + select_full_account(&mut conn, account.id()).expect("Failed to load full account"); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(Asset::Fungible(FungibleAsset::new(faucet_id_1, ADDED_AMOUNT).unwrap())) + .unwrap(); + vault_delta + .remove_asset(Asset::Fungible(FungibleAsset::new(faucet_id, INITIAL_AMOUNT).unwrap())) + .unwrap(); + + let partial_delta = AccountDelta::new( + account.id(), + AccountStorageDelta::new(), + vault_delta, + Felt::new(NONCE_DELTA), + ) + .unwrap(); + + let mut expected_account = full_account_before.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let expected_commitment = expected_account.to_commitment(); + let expected_vault_root = expected_account.vault().root(); + + let account_update = BlockAccountUpdate::new( + account.id(), + expected_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + upsert_accounts(&mut conn, &[account_update], block_2).expect("Partial delta upsert failed"); + + let vault_assets_after = select_account_vault_at_block(&mut conn, account.id(), block_2) + .expect("Query vault should succeed"); + + assert_eq!(vault_assets_after.len(), 1, "Should have 1 vault asset"); + assert_matches!(&vault_assets_after[0], Asset::Fungible(f) => { + assert_eq!(f.faucet_id(), faucet_id_1, "Faucet ID should match"); + assert_eq!(f.amount(), ADDED_AMOUNT, "Amount should match"); + }); + + let full_account_after = select_full_account(&mut conn, account.id()) + .expect("Failed to load full account after update"); + + assert_eq!(full_account_after.vault().root(), expected_vault_root); + assert_eq!(full_account_after.to_commitment(), expected_commitment); +} + +#[test] +fn optimized_delta_updates_storage_map_header() { + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_SEED: [u8; 32] = [30u8; 32]; + // Use fixed block numbers to ensure deterministic ordering. + const BLOCK_NUM_1: u32 = 1; + const BLOCK_NUM_2: u32 = 2; + // Use explicit slot index to avoid magic numbers. + const SLOT_INDEX_MAP: usize = 3; + // Use fixed map values to validate root updates. + const MAP_KEY_VALUES: [u64; 4] = [7, 0, 0, 0]; + const MAP_VALUE_INITIAL: [u64; 4] = [10, 20, 30, 40]; + const MAP_VALUE_UPDATED: [u64; 4] = [50, 60, 70, 80]; + // Use nonzero nonce delta (required when storage/vault changes). + const NONCE_DELTA: u64 = 1; + + let mut conn = setup_test_db(); + + let map_key = StorageMapKey::new(Word::from([ + Felt::new(MAP_KEY_VALUES[0]), + Felt::new(MAP_KEY_VALUES[1]), + Felt::new(MAP_KEY_VALUES[2]), + Felt::new(MAP_KEY_VALUES[3]), + ])); + let map_value_initial = Word::from([ + Felt::new(MAP_VALUE_INITIAL[0]), + Felt::new(MAP_VALUE_INITIAL[1]), + Felt::new(MAP_VALUE_INITIAL[2]), + Felt::new(MAP_VALUE_INITIAL[3]), + ]); + let map_value_updated = Word::from([ + Felt::new(MAP_VALUE_UPDATED[0]), + Felt::new(MAP_VALUE_UPDATED[1]), + Felt::new(MAP_VALUE_UPDATED[2]), + Felt::new(MAP_VALUE_UPDATED[3]), + ]); + + let storage_map = StorageMap::with_entries(vec![(map_key, map_value_initial)]).unwrap(); + let component_storage = + vec![StorageSlot::with_map(StorageSlotName::mock(SLOT_INDEX_MAP), storage_map)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc map push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + let block_1 = BlockNumber::from(BLOCK_NUM_1); + let block_2 = BlockNumber::from(BLOCK_NUM_2); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + let delta_initial = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_initial = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta_initial), + ); + upsert_accounts(&mut conn, &[account_update_initial], block_1).expect("Initial upsert failed"); + + let full_account_before = + select_full_account(&mut conn, account.id()).expect("Failed to load full account"); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(map_key, map_value_updated); + let storage_delta = AccountStorageDelta::from_raw(BTreeMap::from_iter([( + StorageSlotName::mock(SLOT_INDEX_MAP), + StorageSlotDelta::Map(map_delta), + )])); + + let partial_delta = AccountDelta::new( + account.id(), + storage_delta, + AccountVaultDelta::default(), + Felt::new(NONCE_DELTA), + ) + .unwrap(); + + let mut expected_account = full_account_before.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let expected_commitment = expected_account.to_commitment(); + let expected_storage_commitment = expected_account.storage().to_commitment(); + + let account_update = BlockAccountUpdate::new( + account.id(), + expected_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + upsert_accounts(&mut conn, &[account_update], block_2).expect("Partial delta upsert failed"); + + let (header_after, storage_header_after) = + select_account_header_with_storage_header_at_block(&mut conn, account.id(), block_2) + .expect("Query should succeed") + .expect("Account should exist"); + + assert_eq!( + storage_header_after.to_commitment(), + expected_storage_commitment, + "Storage commitment should match after map delta" + ); + assert_eq!( + header_after.to_commitment(), + expected_commitment, + "Account commitment should match after map delta" + ); +} + +/// Tests that a private account update (no public state) is handled correctly. +/// +/// Private accounts store only the account commitment, not the full state. +#[test] +fn upsert_private_account() { + use miden_protocol::account::{AccountIdVersion, AccountStorageMode, AccountType}; + + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_ID_SEED: [u8; 15] = [20u8; 15]; + // Use fixed block number to keep test ordering deterministic. + const BLOCK_NUM: u32 = 1; + // Use fixed commitment values to validate storage behavior. + const COMMITMENT_WORDS: [u64; 4] = [1, 2, 3, 4]; + + let mut conn = setup_test_db(); + + let block_num = BlockNumber::from(BLOCK_NUM); + insert_block_header(&mut conn, block_num); + + // Create a private account ID + let account_id = AccountId::dummy( + ACCOUNT_ID_SEED, + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Private, + ); + + let account_commitment = Word::from([ + Felt::new(COMMITMENT_WORDS[0]), + Felt::new(COMMITMENT_WORDS[1]), + Felt::new(COMMITMENT_WORDS[2]), + Felt::new(COMMITMENT_WORDS[3]), + ]); + + // Insert as private account + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Private); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Private account upsert failed"); + + // Verify the account exists and commitment matches + + let (stored_commitment, stored_nonce, stored_code): (Vec, Option, Option>) = + accounts::table + .filter(accounts::account_id.eq(account_id.to_bytes())) + .filter(accounts::is_latest.eq(true)) + .select((accounts::account_commitment, accounts::nonce, accounts::code_commitment)) + .first(&mut conn) + .expect("Account should exist in DB"); + + assert_eq!( + stored_commitment, + account_commitment.to_bytes(), + "Stored commitment should match" + ); + + // Private accounts have NULL for nonce, code_commitment, storage_header, vault_root + assert!(stored_nonce.is_none(), "Private account should have NULL nonce"); + assert!(stored_code.is_none(), "Private account should have NULL code_commitment"); +} + +/// Tests that a full-state delta (new account creation) is handled correctly. +/// +/// Full-state deltas contain the complete account state including code. +#[test] +fn upsert_full_state_delta() { + // Use deterministic account seed to keep account IDs stable. + const ACCOUNT_SEED: [u8; 32] = [20u8; 32]; + // Use fixed block number to keep test ordering deterministic. + const BLOCK_NUM: u32 = 1; + // Use fixed slot values to validate storage behavior. + const SLOT_VALUES: [u64; 4] = [10, 20, 30, 40]; + // Use explicit slot index to avoid magic numbers. + const SLOT_INDEX: usize = 0; + + let mut conn = setup_test_db(); + + let block_num = BlockNumber::from(BLOCK_NUM); + insert_block_header(&mut conn, block_num); + + // Create an account with storage + let slot_value = Word::from([ + Felt::new(SLOT_VALUES[0]), + Felt::new(SLOT_VALUES[1]), + Felt::new(SLOT_VALUES[2]), + Felt::new(SLOT_VALUES[3]), + ]); + let component_storage = + vec![StorageSlot::with_value(StorageSlotName::mock(SLOT_INDEX), slot_value)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc bar push.2 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new(ACCOUNT_SEED) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + // Create a full-state delta from the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + assert!(delta.is_full_state(), "Delta should be full state"); + + let account_update = BlockAccountUpdate::new( + account.id(), + account.to_commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Full-state delta upsert failed"); + + // Verify the account state was stored correctly + let (header, storage_header) = + select_account_header_with_storage_header_at_block(&mut conn, account.id(), block_num) + .expect("Query should succeed") + .expect("Account should exist"); + + assert_eq!(header.nonce(), account.nonce(), "Nonce should match"); + assert_eq!( + header.code_commitment(), + account.code().commitment(), + "Code commitment should match" + ); + assert_eq!( + storage_header.to_commitment(), + account.storage().to_commitment(), + "Storage commitment should match" + ); + + // Verify we can load the full account back + let loaded_account = + select_full_account(&mut conn, account.id()).expect("Should load full account"); + + assert_eq!(loaded_account.nonce(), account.nonce()); + assert_eq!(loaded_account.code().commitment(), account.code().commitment()); + assert_eq!(loaded_account.storage().to_commitment(), account.storage().to_commitment()); +} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 790c4519ed..572cab258c 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -13,7 +13,8 @@ use diesel::{ }; use diesel_migrations::MigrationHarness; use miden_node_utils::fee::test_fee_params; -use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; +use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, @@ -23,19 +24,25 @@ use miden_protocol::account::{ AccountId, AccountIdVersion, AccountStorage, + AccountStorageDelta, AccountStorageHeader, AccountStorageMode, AccountType, + AccountVaultDelta, StorageMap, + StorageMapDelta, + StorageMapKey, StorageSlot, + StorageSlotContent, + StorageSlotDelta, StorageSlotName, StorageSlotType, }; use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_protocol::{EMPTY_WORD, Felt, Word}; -use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; +use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; use super::*; @@ -92,18 +99,19 @@ fn reconstruct_account_storage_at_block( .load(conn)?; // For each (slot_name, key) pair, keep only the latest entry - let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); + let mut latest_map_entries: BTreeMap<(StorageSlotName, StorageMapKey), Word> = BTreeMap::new(); for (_, slot_name_str, key_bytes, value_bytes) in map_values { let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) })?; - let key = Word::read_from_bytes(&key_bytes)?; + let key = StorageMapKey::read_from_bytes(&key_bytes)?; let value = Word::read_from_bytes(&value_bytes)?; latest_map_entries.entry((slot_name, key)).or_insert(value); } // Group entries by slot name - let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + let mut map_entries_by_slot: BTreeMap> = + BTreeMap::new(); for ((slot_name, key), value) in latest_map_entries { map_entries_by_slot.entry(slot_name).or_default().push((key, value)); } @@ -143,15 +151,22 @@ fn create_test_account_with_storage() -> (Account, AccountId) { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -161,6 +176,7 @@ fn create_test_account_with_storage() -> (Account, AccountId) { fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { use crate::db::schema::block_headers; + let secret_key = SecretKey::new(); let block_header = BlockHeader::new( 1_u8.into(), Word::default(), @@ -171,20 +187,73 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { Word::default(), Word::default(), Word::default(), - SecretKey::new().public_key(), + secret_key.public_key(), test_fee_params(), 0_u8.into(), ); + let signature = secret_key.sign(block_header.commitment()); diesel::insert_into(block_headers::table) .values(( block_headers::block_num.eq(i64::from(block_num.as_u32())), block_headers::block_header.eq(block_header.to_bytes()), + block_headers::signature.eq(signature.to_bytes()), + block_headers::commitment.eq(block_header.commitment().to_bytes()), )) .execute(conn) .expect("Failed to insert block header"); } +fn create_account_with_map_storage( + slot_name: StorageSlotName, + entries: Vec<(StorageMapKey, Word)>, +) -> Account { + let storage_map = StorageMap::with_entries(entries).unwrap(); + let component_storage = vec![StorageSlot::with_map(slot_name, storage_map)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc map push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + AccountBuilder::new([9u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap() +} + +fn assert_storage_map_slot_entries( + storage: &AccountStorage, + slot_name: &StorageSlotName, + expected: &BTreeMap, +) { + let slot = storage + .slots() + .iter() + .find(|slot| slot.name() == slot_name) + .expect("expected storage slot"); + + let StorageSlotContent::Map(storage_map) = slot.content() else { + panic!("expected map slot"); + }; + + let entries = BTreeMap::from_iter(storage_map.entries().map(|(key, value)| (*key, *value))); + assert_eq!(&entries, expected, "map entries mismatch"); +} + // ACCOUNT HEADER AT BLOCK TESTS // ================================================================================================ @@ -222,7 +291,7 @@ fn test_select_account_header_at_block_returns_correct_header() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); @@ -259,7 +328,7 @@ fn test_select_account_header_at_block_historical_query() { let delta_1 = AccountDelta::try_from(account.clone()).unwrap(); let account_update_1 = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta_1), ); @@ -298,7 +367,7 @@ fn test_select_account_vault_at_block_empty() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); @@ -325,7 +394,7 @@ fn test_upsert_accounts_inserts_storage_header() { let storage_commitment_original = account.storage().to_commitment(); let storage_slots_len = account.storage().slots().len(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); // Create full state delta from the account let delta = AccountDelta::try_from(account).unwrap(); @@ -379,7 +448,7 @@ fn test_upsert_accounts_updates_is_latest_flag() { // Save storage commitment before moving account let storage_commitment_1 = account.storage().to_commitment(); - let account_commitment_1 = account.commitment(); + let account_commitment_1 = account.to_commitment(); // First update with original account - full state delta let delta_1 = AccountDelta::try_from(account).unwrap(); @@ -402,20 +471,27 @@ fn test_upsert_accounts_updates_is_latest_flag() { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component_2 = AccountComponent::new(account_component_code, component_storage_modified) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component_2 = AccountComponent::new( + account_component_code, + component_storage_modified, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account_2 = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component_2) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); let storage_commitment_2 = account_2.storage().to_commitment(); - let account_commitment_2 = account_2.commitment(); + let account_commitment_2 = account_2.to_commitment(); // Second update with modified account - full state delta let delta_2 = AccountDelta::try_from(account_2).unwrap(); @@ -495,15 +571,22 @@ fn test_upsert_accounts_with_multiple_storage_slots() { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -511,7 +594,7 @@ fn test_upsert_accounts_with_multiple_storage_slots() { insert_block_header(&mut conn, block_num); let storage_commitment = account.storage().to_commitment(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); let delta = AccountDelta::try_from(account).unwrap(); let account_update = @@ -530,11 +613,12 @@ fn test_upsert_accounts_with_multiple_storage_slots() { "Storage commitment mismatch" ); - // Note: Auth component adds 1 storage slot, so 3 component slots + 1 auth = 4 total + // Note: AuthSingleSig adds 2 storage slots (pub key + scheme id), so 3 component slots + 2 auth + // = 5 total assert_eq!( queried_storage.slots().len(), - 4, - "Expected 4 storage slots (3 component + 1 auth)" + 5, + "Expected 5 storage slots (3 component + 2 auth)" ); // The storage commitment matching proves that all values are correctly preserved. @@ -557,15 +641,22 @@ fn test_upsert_accounts_with_empty_storage() { .compile_component_code("test::interface", "pub proc foo push.1 end") .unwrap(); - let component = AccountComponent::new(account_component_code, vec![]) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component = AccountComponent::new( + account_component_code, + vec![], + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); let account = AccountBuilder::new([3u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -573,7 +664,7 @@ fn test_upsert_accounts_with_empty_storage() { insert_block_header(&mut conn, block_num); let storage_commitment = account.storage().to_commitment(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); let delta = AccountDelta::try_from(account).unwrap(); let account_update = @@ -592,8 +683,8 @@ fn test_upsert_accounts_with_empty_storage() { "Storage commitment mismatch for empty storage" ); - // Note: Auth component adds 1 storage slot, so even "empty" accounts have 1 slot - assert_eq!(queried_storage.slots().len(), 1, "Expected 1 storage slot (auth component)"); + // Note: AuthSingleSig adds 2 storage slots (pub key + scheme id) + assert_eq!(queried_storage.slots().len(), 2, "Expected 2 storage slots (auth component)"); // Verify the storage header blob exists in database let storage_header_exists: Option = SelectDsl::select( @@ -613,6 +704,174 @@ fn test_upsert_accounts_with_empty_storage() { ); } +// STORAGE MAP LATEST ACCOUNT QUERY TESTS +// ================================================================================================ + +#[test] +fn test_select_latest_account_storage_ordering_semantics() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let slot_name = StorageSlotName::mock(0); + let key_1 = StorageMapKey::from_index(1); + let key_2 = StorageMapKey::from_index(2); + let key_3 = StorageMapKey::from_index(3); + + let value_1 = Word::from([Felt::new(10), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_2 = Word::from([Felt::new(20), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_3 = Word::from([Felt::new(30), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let mut entries = vec![(key_2, value_2), (key_1, value_1), (key_3, value_3)]; + entries.reverse(); + + let account = create_account_with_map_storage(slot_name.clone(), entries.clone()); + let account_id = account.id(); + let account_commitment = account.to_commitment(); + + let mut reversed_entries = entries.clone(); + reversed_entries.reverse(); + let reordered_account = create_account_with_map_storage(slot_name.clone(), reversed_entries); + assert_eq!( + account.storage().to_commitment(), + reordered_account.storage().to_commitment(), + "storage commitments should be order-independent" + ); + + let delta = AccountDelta::try_from(account).unwrap(); + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + let storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + let expected = BTreeMap::from_iter(entries); + assert_storage_map_slot_entries(&storage, &slot_name, &expected); +} + +#[test] +fn test_select_latest_account_storage_multiple_slots() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let slot_name_1 = StorageSlotName::mock(0); + let slot_name_2 = StorageSlotName::mock(1); + + let key_a = StorageMapKey::from_index(1); + let key_b = StorageMapKey::from_index(2); + + let value_a = Word::from([Felt::new(11), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_b = Word::from([Felt::new(22), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let map_a = StorageMap::with_entries(vec![(key_a, value_a)]).unwrap(); + let map_b = StorageMap::with_entries(vec![(key_b, value_b)]).unwrap(); + + let component_storage = vec![ + StorageSlot::with_map(slot_name_2.clone(), map_b), + StorageSlot::with_map(slot_name_1.clone(), map_a), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc map push.1 end") + .unwrap(); + + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountImmutableCode), + ) + .unwrap(); + + let account = AccountBuilder::new([9u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) + .build_existing() + .unwrap(); + + let account_id = account.id(); + let account_commitment = account.to_commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + let storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + let expected_slot_1 = BTreeMap::from_iter([(key_a, value_a)]); + let expected_slot_2 = BTreeMap::from_iter([(key_b, value_b)]); + + assert_storage_map_slot_entries(&storage, &slot_name_1, &expected_slot_1); + assert_storage_map_slot_entries(&storage, &slot_name_2, &expected_slot_2); +} + +#[test] +fn test_select_latest_account_storage_slot_updates() { + let mut conn = setup_test_db(); + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + + let slot_name = StorageSlotName::mock(0); + let key_1 = StorageMapKey::from_index(1); + let key_2 = StorageMapKey::from_index(2); + + let value_1 = Word::from([Felt::new(10), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_2 = Word::from([Felt::new(20), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + let value_3 = Word::from([Felt::new(30), Felt::ZERO, Felt::ZERO, Felt::ZERO]); + + let account = create_account_with_map_storage(slot_name.clone(), vec![(key_1, value_1)]); + let account_id = account.id(); + let account_commitment = account.to_commitment(); + + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key_1, value_2); + map_delta.insert(key_2, value_3); + let storage_delta = AccountStorageDelta::from_raw(BTreeMap::from_iter([( + slot_name.clone(), + StorageSlotDelta::Map(map_delta), + )])); + + let partial_delta = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::new(1)) + .unwrap(); + + let mut expected_account = account.clone(); + expected_account.apply_delta(&partial_delta).unwrap(); + let expected_commitment = expected_account.to_commitment(); + + let account_update = BlockAccountUpdate::new( + account_id, + expected_commitment, + AccountUpdateDetails::Delta(partial_delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_2).expect("upsert_accounts failed"); + + let storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + let expected = BTreeMap::from_iter([(key_1, value_2), (key_2, value_3)]); + assert_storage_map_slot_entries(&storage, &slot_name, &expected); +} + // VAULT AT BLOCK HISTORICAL QUERY TESTS // ================================================================================================ @@ -646,7 +905,7 @@ fn test_select_account_vault_at_block_historical_with_updates() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); @@ -655,34 +914,12 @@ fn test_select_account_vault_at_block_historical_with_updates() { .expect("upsert_accounts failed"); } - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_2, - ) - .expect("upsert_accounts block 2 failed"); - - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_3, - ) - .expect("upsert_accounts block 3 failed"); - // Insert vault asset at block 1: vault_key_1 = 1000 tokens let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ Felt::new(1), - Felt::new(0), - Felt::new(0), - Felt::new(0), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, ])); let asset_v1 = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); @@ -697,9 +934,9 @@ fn test_select_account_vault_at_block_historical_with_updates() { // Add a second vault_key at block 2 let vault_key_2 = AssetVaultKey::new_unchecked(Word::from([ Felt::new(2), - Felt::new(0), - Felt::new(0), - Felt::new(0), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, ])); let asset_key2 = Asset::Fungible(FungibleAsset::new(faucet_id, 500).unwrap()); insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_2, Some(asset_key2)) @@ -747,6 +984,66 @@ fn test_select_account_vault_at_block_historical_with_updates() { assert!(amounts.contains(&500), "Block 3 should have vault_key_2 with 500 tokens"); } +/// Tests that a 5-block history returns the correct asset per block. +#[test] +fn test_select_account_vault_at_block_exponential_updates() { + const BLOCK_COUNT: u32 = 5; + + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let blocks: Vec = (0..BLOCK_COUNT).map(BlockNumber::from).collect(); + + for block in &blocks { + insert_block_header(&mut conn, *block); + } + + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.to_commitment(), + AccountUpdateDetails::Delta(delta), + ); + + for block in &blocks { + upsert_accounts(&mut conn, std::slice::from_ref(&account_update), *block) + .expect("upsert_accounts failed"); + } + + let vault_key = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(3), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, + ])); + + for (index, block) in blocks.iter().enumerate() { + let amount = 1u64 << index; + let asset = Asset::Fungible(FungibleAsset::new(faucet_id, amount).unwrap()); + insert_account_vault_asset(&mut conn, account_id, *block, vault_key, Some(asset)) + .expect("insert vault asset failed"); + } + + for (index, block) in blocks.iter().enumerate() { + let assets_at_block = select_account_vault_at_block(&mut conn, account_id, *block) + .expect("Query at block should succeed"); + + assert_eq!(assets_at_block.len(), 1, "Should have 1 asset at block"); + let expected_amount = 1u64 << index; + assert_matches!( + &assets_at_block[0], + Asset::Fungible(f) if f.amount() == expected_amount + ); + } +} + /// Tests that deleted vault assets (asset = None) are correctly excluded from results, /// and that the deduplication handles deletion entries properly. #[test] @@ -774,7 +1071,7 @@ fn test_select_account_vault_at_block_with_deletion() { let delta = AccountDelta::try_from(account.clone()).unwrap(); let account_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(delta), ); @@ -783,34 +1080,12 @@ fn test_select_account_vault_at_block_with_deletion() { .expect("upsert_accounts failed"); } - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_2, - ) - .expect("upsert_accounts block 2 failed"); - - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_3, - ) - .expect("upsert_accounts block 3 failed"); - // Insert vault asset at block 1 let vault_key = AssetVaultKey::new_unchecked(Word::from([ Felt::new(1), - Felt::new(0), - Felt::new(0), - Felt::new(0), + Felt::ZERO, + Felt::ZERO, + Felt::ZERO, ])); let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 3c295c72b8..bfcd34ee7a 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,6 +11,8 @@ use diesel::{ SelectableHelper, SqliteConnection, }; +use miden_crypto::Word; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; @@ -124,34 +126,93 @@ pub fn select_all_block_headers( vec_raw_try_into(raw_block_headers) } +/// Select all block headers from the DB using the given [`SqliteConnection`]. +/// +/// # Returns +/// +/// A vector of [`BlockHeader`] or an error. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT commitment +/// FROM block_headers +/// ORDER BY block_num ASC +/// ``` +pub fn select_all_block_header_commitments( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + let raw_commitments = + QueryDsl::select(schema::block_headers::table, schema::block_headers::commitment) + .order(schema::block_headers::block_num.asc()) + .load::>(conn)?; + let commitments = + Result::from_iter(raw_commitments.into_iter().map(BlockHeaderCommitment::from_raw_sql))?; + Ok(commitments) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(transparent)] +pub struct BlockHeaderCommitment(pub(crate) Word); + +impl BlockHeaderCommitment { + pub fn new(header: &BlockHeader) -> Self { + Self(header.commitment()) + } + pub fn word(self) -> Word { + self.0 + } +} + #[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderRawRow { - #[allow(dead_code)] + #[expect(dead_code)] pub block_num: i64, pub block_header: Vec, + pub signature: Vec, + pub commitment: Vec, } impl TryInto for BlockHeaderRawRow { type Error = DatabaseError; fn try_into(self) -> Result { - let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let block_header = BlockHeader::from_raw_sql(self.block_header)?; + // we're bust if this invariant doesn't hold + debug_assert_eq!( + BlockHeaderCommitment::new(&block_header), + BlockHeaderCommitment::from_raw_sql(self.commitment) + .expect("Database always contains valid format commitments") + ); Ok(block_header) } } +impl TryInto<(BlockHeader, Signature)> for BlockHeaderRawRow { + type Error = DatabaseError; + fn try_into(self) -> Result<(BlockHeader, Signature), Self::Error> { + let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let signature = Signature::read_from_bytes(&self.signature[..])?; + Ok((block_header, signature)) + } +} + #[derive(Debug, Clone, Insertable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderInsert { pub block_num: i64, pub block_header: Vec, + pub signature: Vec, + pub commitment: Vec, } -impl From<&BlockHeader> for BlockHeaderInsert { - fn from(block_header: &BlockHeader) -> Self { +impl From<(&BlockHeader, &Signature)> for BlockHeaderInsert { + fn from((header, signature): (&BlockHeader, &Signature)) -> Self { Self { - block_num: block_header.block_num().to_raw_sql(), - block_header: block_header.to_bytes(), + block_num: header.block_num().to_raw_sql(), + block_header: header.to_bytes(), + signature: signature.to_bytes(), + commitment: BlockHeaderCommitment::new(header).to_raw_sql(), } } } @@ -174,8 +235,9 @@ impl From<&BlockHeader> for BlockHeaderInsert { pub(crate) fn insert_block_header( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, ) -> Result { - let block_header = BlockHeaderInsert::from(block_header); + let block_header = BlockHeaderInsert::from((block_header, signature)); let count = diesel::insert_into(schema::block_headers::table) .values(&[block_header]) .execute(conn)?; diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 0f29b00157..ad2010f840 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -25,20 +25,14 @@ //! transaction, any nesting of further `transaction(conn, || {})` has no effect and should be //! considered unnecessary boilerplate by default. -#![allow( - clippy::needless_pass_by_value, - reason = "The parent scope does own it, passing by value avoids additional boilerplate" -)] - use diesel::SqliteConnection; -use miden_protocol::account::AccountId; -use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::OrderedTransactionHeaders; use super::DatabaseError; -use crate::db::{NoteRecord, StateSyncUpdate}; -use crate::errors::StateSyncError; +use crate::db::NoteRecord; mod transactions; pub use transactions::*; @@ -47,18 +41,22 @@ pub use block_headers::*; mod accounts; pub use accounts::*; mod nullifiers; +pub use nullifiers::NullifiersPage; pub(crate) use nullifiers::*; mod notes; pub(crate) use notes::*; /// Apply a new block to the state /// +/// # Arguments +/// /// # Returns /// /// Number of records inserted and/or updated. pub(crate) fn apply_block( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, notes: &[(NoteRecord, Option)], nullifiers: &[Nullifier], accounts: &[BlockAccountUpdate], @@ -66,7 +64,7 @@ pub(crate) fn apply_block( ) -> Result { let mut count = 0; // Note: ordering here is important as the relevant tables have FK dependencies. - count += insert_block_header(conn, block_header)?; + count += insert_block_header(conn, block_header, signature)?; count += upsert_accounts(conn, accounts, block_header.block_num())?; count += insert_scripts(conn, notes.iter().map(|(note, _)| note))?; count += insert_notes(conn, notes)?; @@ -74,52 +72,3 @@ pub(crate) fn apply_block( count += insert_nullifiers_for_block(conn, nullifiers, block_header.block_num())?; Ok(count) } - -/// Loads the state necessary for a state sync -/// -/// The state sync covers from `from_start_block` until the last block that has a note matching the -/// given `note_tags`. -pub(crate) fn get_state_sync( - conn: &mut SqliteConnection, - from_start_block: BlockNumber, - account_ids: Vec, - note_tags: Vec, -) -> Result { - let chain_tip = select_block_header_by_block_num(conn, None)? - .expect("Chain tip is not found") - .block_num(); - - // Sync notes from the starting block to the latest in the chain. - let block_range = from_start_block..=chain_tip; - - // select notes since block by tag and sender - let (notes, _) = select_notes_since_block_by_tag_and_sender( - conn, - &account_ids[..], - ¬e_tags[..], - block_range, - )?; - - // select block header by block num - let maybe_note_block_num = notes.first().map(|note| note.block_num); - let block_header: BlockHeader = select_block_header_by_block_num(conn, maybe_note_block_num)? - .ok_or_else(|| StateSyncError::EmptyBlockHeadersTable)?; - - // select accounts by block range - let to_end_block = block_header.block_num(); - let account_updates = - select_accounts_by_block_range(conn, &account_ids, from_start_block..=to_end_block)?; - - // select transactions by accounts and block range - let transactions = select_transactions_by_accounts_and_block_range( - conn, - &account_ids, - from_start_block..=to_end_block, - )?; - Ok(StateSyncUpdate { - notes, - block_header, - account_updates, - transactions, - }) -} diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index a2ab7b1bb0..49bdce4198 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -1,4 +1,4 @@ -#![allow( +#![expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] @@ -41,10 +41,10 @@ use miden_protocol::note::{ NoteDetails, NoteId, NoteInclusionProof, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, + NoteStorage, NoteTag, NoteType, Nullifier, @@ -203,7 +203,7 @@ pub(crate) fn select_notes_since_block_by_tag_and_sender( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -283,7 +283,7 @@ pub(crate) fn select_existing_note_commitments( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -427,7 +427,7 @@ pub(crate) fn select_note_script_by_root( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script, @@ -441,14 +441,7 @@ pub(crate) fn select_note_script_by_root( /// ORDER BY notes.rowid ASC /// LIMIT ?4 /// ``` -#[allow( - clippy::cast_sign_loss, - reason = "We need custom SQL statements which has given types that we need to convert" -)] -#[allow( - clippy::too_many_lines, - reason = "Lines will be reduced when schema is updated to simplify logic" -)] +#[expect(clippy::cast_sign_loss, reason = "row_id is a positive integer")] pub(crate) fn select_unconsumed_network_notes_by_account_id( conn: &mut SqliteConnection, account_id: AccountId, @@ -460,7 +453,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( diesel::dsl::sql::("notes.rowid >= ") .bind::(page.token.unwrap_or_default() as i64); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -470,7 +463,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( i64, // rowid (from sql::("notes.rowid")) ); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -550,7 +543,6 @@ pub struct NoteSyncRecordRawRow { pub inclusion_path: Vec, // SparseMerklePath } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for NoteSyncRecordRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -575,7 +567,7 @@ impl TryInto for NoteSyncRecordRawRow { #[diesel(check_for_backend(Sqlite))] pub struct NoteDetailsRawRow { pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, } @@ -601,7 +593,7 @@ pub struct NoteRecordWithScriptRawJoined { // #[diesel(embed)] // pub metadata: NoteMetadataRaw, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, // #[diesel(embed)] @@ -623,7 +615,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, } = note; @@ -638,7 +630,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, script, @@ -666,7 +658,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { attachment, // metadata ^^^, assets, - inputs, + storage, serial_num, //details ^^^, inclusion_path, @@ -675,7 +667,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { } = raw; let index = BlockNoteIndexRawRow { batch_index, note_index }; let metadata = NoteMetadataRawRow { note_type, sender, tag, attachment }; - let details = NoteDetailsRawRow { assets, inputs, serial_num }; + let details = NoteDetailsRawRow { assets, storage, serial_num }; let metadata = metadata.try_into()?; let committed_at = BlockNumber::from_raw_sql(committed_at)?; @@ -684,16 +676,21 @@ impl TryInto for NoteRecordWithScriptRawJoined { let script = script.map(|script| NoteScript::read_from_bytes(&script[..])).transpose()?; let details = if let NoteDetailsRawRow { assets: Some(assets), - inputs: Some(inputs), + storage: Some(storage), serial_num: Some(serial_num), } = details { - let inputs = NoteInputs::read_from_bytes(&inputs[..])?; + let storage = NoteStorage::read_from_bytes(&storage[..])?; let serial_num = Word::read_from_bytes(&serial_num[..])?; - let script = script.ok_or_else(|| { - DatabaseError::conversiont_from_sql::(None) - })?; - let recipient = NoteRecipient::new(serial_num, script, inputs); + let script = + script.ok_or_else(|| { + miden_node_db::DatabaseError::conversiont_from_sql::< + NoteRecipient, + DatabaseError, + _, + >(None) + })?; + let recipient = NoteRecipient::new(serial_num, script, storage); let assets = NoteAssets::read_from_bytes(&assets[..])?; Some(NoteDetails::new(assets, recipient)) } else { @@ -730,7 +727,7 @@ pub struct NoteRecordRawRow { pub attachment: Vec, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, pub inclusion_path: Vec, @@ -746,16 +743,16 @@ pub struct NoteMetadataRawRow { attachment: Vec, } -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] impl TryInto for NoteMetadataRawRow { type Error = DatabaseError; fn try_into(self) -> Result { let sender = AccountId::read_from_bytes(&self.sender[..])?; let note_type = NoteType::try_from(self.note_type as u32) - .map_err(DatabaseError::conversiont_from_sql::)?; + .map_err(miden_node_db::DatabaseError::conversiont_from_sql::)?; let tag = NoteTag::new(self.tag as u32); let attachment = NoteAttachment::read_from_bytes(&self.attachment)?; - Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) + Ok(NoteMetadata::new(sender, note_type).with_tag(tag).with_attachment(attachment)) } } @@ -767,14 +764,16 @@ pub struct BlockNoteIndexRawRow { pub note_index: i32, // index within batch } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] +#[expect(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for BlockNoteIndexRawRow { type Error = DatabaseError; fn try_into(self) -> Result { let batch_index = self.batch_index as usize; let note_index = self.note_index as usize; let index = BlockNoteIndex::new(batch_index, note_index).ok_or_else(|| { - DatabaseError::conversiont_from_sql::(None) + miden_node_db::DatabaseError::conversiont_from_sql::( + None, + ) })?; Ok(index) } @@ -791,7 +790,6 @@ impl TryInto for BlockNoteIndexRawRow { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -822,7 +820,6 @@ pub(crate) fn insert_notes( /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -868,7 +865,7 @@ pub struct NoteInsertRow { pub consumed_at: Option, pub nullifier: Option>, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub script_root: Option>, pub serial_num: Option>, } @@ -902,7 +899,7 @@ impl From<(NoteRecord, Option)> for NoteInsertRow { consumed_at: None::, // New notes are always unconsumed. nullifier: nullifier.as_ref().map(Nullifier::to_bytes), assets: note.details.as_ref().map(|d| d.assets().to_bytes()), - inputs: note.details.as_ref().map(|d| d.inputs().to_bytes()), + storage: note.details.as_ref().map(|d| d.storage().to_bytes()), script_root: note.details.as_ref().map(|d| d.script().root().to_bytes()), serial_num: note.details.as_ref().map(|d| d.serial_num().to_bytes()), } diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index 5ab5785374..84e89ebad5 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -1,3 +1,4 @@ +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::query_dsl::methods::SelectDsl; @@ -128,6 +129,7 @@ pub(crate) fn select_nullifiers_by_prefix( /// ORDER BY /// block_num ASC /// ``` +#[cfg(test)] pub(crate) fn select_all_nullifiers( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { @@ -137,6 +139,67 @@ pub(crate) fn select_all_nullifiers( vec_raw_try_into(nullifiers_raw) } +/// Page of nullifiers returned by [`select_nullifiers_paged`]. +#[derive(Debug)] +pub struct NullifiersPage { + /// The nullifiers in this page. + pub nullifiers: Vec, + /// If `Some`, there are more results. Use this as the `after_nullifier` for the next page. + pub next_cursor: Option, +} + +/// Selects nullifiers with pagination. +/// +/// Returns up to `page_size` nullifiers, starting after `after_nullifier` if provided. +/// Results are ordered by nullifier bytes for stable pagination. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// nullifier, +/// block_num +/// FROM +/// nullifiers +/// WHERE +/// (nullifier > :after_nullifier OR :after_nullifier IS NULL) +/// ORDER BY +/// nullifier ASC +/// LIMIT :page_size + 1 +/// ``` +pub(crate) fn select_nullifiers_paged( + conn: &mut SqliteConnection, + page_size: NonZeroUsize, + after_nullifier: Option, +) -> Result { + // Fetch one extra to determine if there are more results + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = + SelectDsl::select(schema::nullifiers::table, NullifierWithoutPrefixRawRow::as_select()) + .order_by(schema::nullifiers::nullifier.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_nullifier { + query = query.filter(schema::nullifiers::nullifier.gt(cursor.to_bytes())); + } + + let nullifiers_raw = query.load::(conn)?; + let mut nullifiers: Vec = vec_raw_try_into(nullifiers_raw)?; + + // If we got more than page_size, there are more results + let next_cursor = if nullifiers.len() > page_size.get() { + nullifiers.pop(); // Remove the extra element + nullifiers.last().map(|info| info.nullifier) + } else { + None + }; + + Ok(NullifiersPage { nullifiers, next_cursor }) +} + /// Insert nullifiers for a block into the database. /// /// # Parameters @@ -163,7 +226,6 @@ pub(crate) fn select_all_nullifiers( /// INSERT INTO nullifiers (nullifier, nullifier_prefix, block_num) /// VALUES (?1, ?2, ?3) /// ``` -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index be132e1a56..1095fc1899 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -27,67 +27,7 @@ use super::DatabaseError; use crate::COMPONENT; use crate::db::models::conv::SqlTypeConvert; use crate::db::models::{serialize_vec, vec_raw_try_into}; -use crate::db::{TransactionSummary, schema}; - -/// Select transactions for given accounts in a specified block range -/// -/// # Parameters -/// * `account_ids`: List of account IDs to filter by -/// - Limit: 0 <= size <= 1000 -/// * `block_range`: Range of blocks to include inclusive -/// -/// # Returns -/// -/// A vector of [`TransactionSummary`] types or an error. -/// -/// # Raw SQL -/// ```sql -/// SELECT -/// account_id, -/// block_num, -/// transaction_id -/// FROM -/// transactions -/// WHERE -/// block_num > ?1 AND -/// block_num <= ?2 AND -/// account_id IN (?3) -/// ORDER BY -/// transaction_id ASC -/// ``` -pub fn select_transactions_by_accounts_and_block_range( - conn: &mut SqliteConnection, - account_ids: &[AccountId], - block_range: RangeInclusive, -) -> Result, DatabaseError> { - QueryParamAccountIdLimit::check(account_ids.len())?; - - let desired_account_ids = serialize_vec(account_ids); - let raw = SelectDsl::select( - schema::transactions::table, - ( - schema::transactions::account_id, - schema::transactions::block_num, - schema::transactions::transaction_id, - ), - ) - .filter(schema::transactions::block_num.gt(block_range.start().to_raw_sql())) - .filter(schema::transactions::block_num.le(block_range.end().to_raw_sql())) - .filter(schema::transactions::account_id.eq_any(desired_account_ids)) - .order(schema::transactions::transaction_id.asc()) - .load::(conn) - .map_err(DatabaseError::from)?; - vec_raw_try_into(raw) -} - -#[derive(Debug, Clone, PartialEq, Queryable, Selectable, QueryableByName)] -#[diesel(table_name = schema::transactions)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct TransactionSummaryRaw { - account_id: Vec, - block_num: i64, - transaction_id: Vec, -} +use crate::db::schema; #[derive(Debug, Clone, PartialEq, Queryable, Selectable, QueryableByName)] #[diesel(table_name = schema::transactions)] @@ -103,17 +43,6 @@ pub struct TransactionRecordRaw { size_in_bytes: i64, } -impl TryInto for TransactionSummaryRaw { - type Error = DatabaseError; - fn try_into(self) -> Result { - Ok(crate::db::TransactionSummary { - account_id: AccountId::read_from_bytes(&self.account_id[..])?, - block_num: BlockNumber::from_raw_sql(self.block_num)?, - transaction_id: TransactionId::read_from_bytes(&self.transaction_id[..])?, - }) - } -} - impl TryInto for TransactionRecordRaw { type Error = DatabaseError; fn try_into(self) -> Result { @@ -150,7 +79,6 @@ impl TryInto for TransactionRecordRaw { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -161,10 +89,9 @@ pub(crate) fn insert_transactions( block_num: BlockNumber, transactions: &OrderedTransactionHeaders, ) -> Result { - #[allow(clippy::into_iter_on_ref)] // false positive let rows: Vec<_> = transactions .as_slice() - .into_iter() + .iter() .map(|tx| TransactionSummaryRowInsert::new(tx, block_num)) .collect(); @@ -187,7 +114,7 @@ pub struct TransactionSummaryRowInsert { } impl TransactionSummaryRowInsert { - #[allow( + #[expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] @@ -197,11 +124,25 @@ impl TransactionSummaryRowInsert { ) -> Self { const HEADER_BASE_SIZE: usize = 4 + 32 + 16 + 64; // block_num + tx_id + account_id + commitments - // Serialize input notes using binary format (store nullifiers) - let nullifiers_binary = transaction_header.input_notes().to_bytes(); - - // Serialize output notes using binary format (store note IDs) - let output_notes_binary = transaction_header.output_notes().to_bytes(); + // Extract nullifiers from input notes and serialize them. + // We only store the nullifiers (not the full `InputNoteCommitment`) since + // that's all that's needed when reading back `TransactionRecords`. + let nullifiers: Vec = transaction_header + .input_notes() + .iter() + .map(miden_protocol::transaction::InputNoteCommitment::nullifier) + .collect(); + let nullifiers_binary = nullifiers.to_bytes(); + + // Extract note IDs from output note headers and serialize them. + // We only store the `NoteId`s (not the full `NoteHeader` with metadata) since + // that's all that's needed when reading back `TransactionRecords`. + let output_note_ids: Vec = transaction_header + .output_notes() + .iter() + .map(miden_protocol::note::NoteHeader::id) + .collect(); + let output_notes_binary = output_note_ids.to_bytes(); // Manually calculate the estimated size of the transaction header to avoid // the cost of serialization. The size estimation includes: @@ -341,12 +282,13 @@ pub fn select_transactions_records( // Add transactions from this chunk one by one until we hit the limit let mut added_from_chunk = 0; - let mut last_added_tx: Option = None; for tx in chunk { if total_size + tx.size_in_bytes <= max_payload_bytes { total_size += tx.size_in_bytes; - last_added_tx = Some(tx); + last_block_num = Some(tx.block_num); + last_transaction_id = Some(tx.transaction_id.clone()); + all_transactions.push(tx); added_from_chunk += 1; } else { // Can't fit this transaction, stop here @@ -354,13 +296,6 @@ pub fn select_transactions_records( } } - // Update cursor position only for the last transaction that was actually added - if let Some(tx) = last_added_tx { - last_block_num = Some(tx.block_num); - last_transaction_id = Some(tx.transaction_id.clone()); - all_transactions.push(tx); - } - // Break if chunk incomplete (size limit hit or data exhausted) if added_from_chunk < NUM_TXS_PER_CHUNK { break; diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index c472940e45..ef74e86fac 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -1,6 +1,6 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use miden_protocol::note::Nullifier; -use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::utils::Serializable; use crate::errors::DatabaseError; @@ -14,16 +14,6 @@ pub(crate) fn vec_raw_try_into>( ) } -#[allow(dead_code)] -/// Deserialize an iterable container full of byte blobs `B` to types `T` -pub(crate) fn deserialize_raw_vec, T: Deserializable>( - raw: impl IntoIterator, -) -> Result, DeserializationError> { - Result::, DeserializationError>::from_iter( - raw.into_iter().map(|raw| T::read_from_bytes(raw.as_ref())), - ) -} - /// Utility to convert an iterable container to a vector of byte blobs pub(crate) fn serialize_vec<'a, D: Serializable + 'a>( raw: impl IntoIterator, @@ -38,7 +28,6 @@ pub fn get_nullifier_prefix(nullifier: &Nullifier) -> u16 { /// Converts a slice of length `N` to an array, returns `None` if invariant /// isn'crates/store/src/db/mod.rs upheld. -#[allow(dead_code)] pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { if bytes.len() != N { return None; @@ -48,7 +37,7 @@ pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { Some(arr) } -#[allow(dead_code)] +#[expect(dead_code)] #[inline] pub fn from_be_to_u32(bytes: &[u8]) -> Option { slice_to_array::<4>(bytes).map(u32::from_be_bytes) @@ -62,8 +51,8 @@ pub struct PragmaSchemaVersion { } /// Returns the schema version of the database. -#[allow(dead_code)] -#[allow( +#[expect(dead_code)] +#[expect( clippy::cast_sign_loss, reason = "schema version is always positive and we will never reach 0xEFFF_..._FFFF" )] diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 0ae4b8e1e1..f93afc16e8 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -47,6 +47,8 @@ diesel::table! { block_headers (block_num) { block_num -> BigInt, block_header -> Binary, + signature -> Binary, + commitment -> Binary, } } @@ -74,7 +76,7 @@ diesel::table! { consumed_at -> Nullable, nullifier -> Nullable, assets -> Nullable, - inputs -> Nullable, + storage -> Nullable, script_root -> Nullable, serial_num -> Nullable, } diff --git a/crates/store/src/db/schema_hash.rs b/crates/store/src/db/schema_hash.rs index 28e480fc0c..9a5ad1328a 100644 --- a/crates/store/src/db/schema_hash.rs +++ b/crates/store/src/db/schema_hash.rs @@ -11,11 +11,11 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use diesel_migrations::MigrationHarness; +use miden_node_db::SchemaVerificationError; use tracing::instrument; use crate::COMPONENT; use crate::db::migrations::MIGRATIONS; -use crate::errors::SchemaVerificationError; /// Represents a schema object for comparison. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] @@ -107,10 +107,20 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati // Log specific differences at debug level for obj in &missing { - tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Missing or modified" + ); } for obj in &extra { - tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Extra or modified" + ); } return Err(SchemaVerificationError::Mismatch { @@ -129,7 +139,6 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati mod tests { use super::*; use crate::db::migrations::apply_migrations; - use crate::errors::DatabaseError; #[test] fn verify_schema_passes_for_correct_schema() { @@ -181,6 +190,9 @@ mod tests { .execute(&mut conn) .unwrap(); - assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + assert!(matches!( + apply_migrations(&mut conn), + Err(miden_node_db::DatabaseError::SchemaVerification(_)) + )); } } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 44b11c9b43..77fb083af4 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1,13 +1,11 @@ -#![allow(clippy::similar_names, reason = "naming dummy test values is hard")] -#![allow(clippy::too_many_lines, reason = "test code can be long")] - use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; use diesel::{Connection, SqliteConnection}; use miden_node_proto::domain::account::AccountSummary; use miden_node_utils::fee::{test_fee, test_fee_params}; -use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; +use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, @@ -21,6 +19,7 @@ use miden_protocol::account::{ AccountStorageMode, AccountType, AccountVaultDelta, + StorageMapKey, StorageSlot, StorageSlotContent, StorageSlotDelta, @@ -41,7 +40,6 @@ use miden_protocol::note::{ Note, NoteAttachment, NoteDetails, - NoteExecutionHint, NoteHeader, NoteId, NoteMetadata, @@ -52,11 +50,10 @@ use miden_protocol::note::{ use miden_protocol::testing::account_id::{ ACCOUNT_ID_PRIVATE_SENDER, ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, - ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; -use miden_protocol::testing::random_signer::RandomBlockSigner; +use miden_protocol::testing::random_secret_key::random_secret_key; use miden_protocol::transaction::{ InputNoteCommitment, InputNotes, @@ -66,16 +63,19 @@ use miden_protocol::transaction::{ }; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; -use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; -use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; +use miden_standards::note::{NetworkAccountTarget, NoteExecutionHint, P2idNote}; use pretty_assertions::assert_eq; use rand::Rng; use super::{AccountInfo, NoteRecord, NullifierInfo}; -use crate::db::TransactionSummary; use crate::db::migrations::apply_migrations; -use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; +use crate::db::models::queries::{ + HISTORICAL_BLOCK_RETENTION, + StorageMapValue, + insert_account_storage_map_value, +}; use crate::db::models::{Page, queries, utils}; use crate::errors::DatabaseError; @@ -101,7 +101,8 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { 11_u8.into(), ); - conn.transaction(|conn| queries::insert_block_header(conn, &block_header)) + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + conn.transaction(|conn| queries::insert_block_header(conn, &block_header, &dummy_signature)) .unwrap(); } @@ -162,33 +163,6 @@ fn sql_insert_transactions() { assert_eq!(count, 2, "Two elements must have been inserted"); } -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_select_transactions() { - fn query_transactions(conn: &mut SqliteConnection) -> Vec { - queries::select_transactions_by_accounts_and_block_range( - conn, - &[AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap()], - BlockNumber::from(0)..=BlockNumber::from(2), - ) - .unwrap() - } - - let mut conn = create_db(); - let conn = &mut conn; - let transactions = query_transactions(conn); - - assert!(transactions.is_empty(), "No elements must be initially in the DB"); - - let count = insert_transactions(conn); - - assert_eq!(count, 2, "Two elements must have been inserted"); - - let transactions = query_transactions(conn); - - assert_eq!(transactions.len(), 2, "Two elements must be in the DB"); -} - #[test] #[miden_node_test_macro::enable_logging] fn sql_select_nullifiers() { @@ -219,7 +193,7 @@ pub fn create_note(account_id: AccountId) -> Note { let coin_seed: [u64; 4] = rand::rng().random(); let rng = Arc::new(Mutex::new(RpoRandomCoin::new(coin_seed.map(Felt::new).into()))); let mut rng = rng.lock().unwrap(); - create_p2id_note( + P2idNote::create( account_id, account_id, vec![Asset::Fungible( @@ -339,7 +313,7 @@ fn make_account_and_note( conn, &[BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), )], block_num, @@ -377,12 +351,8 @@ fn sql_unconsumed_network_notes() { note_index: BlockNoteIndex::new(0, i as usize).unwrap(), note_id: num_to_word(i.into()), note_commitment: num_to_word(i.into()), - metadata: NoteMetadata::new( - account_note.0, - NoteType::Public, - NoteTag::with_account_target(account_note.0), - ) - .with_attachment(attachment.clone()), + metadata: NoteMetadata::new(account_note.0, NoteType::Public) + .with_attachment(attachment.clone()), details: None, inclusion_path: SparseMerklePath::default(), }; @@ -768,7 +738,8 @@ fn db_block_header() { ); // test insertion - queries::insert_block_header(conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(conn, &block_header, &dummy_signature).unwrap(); // test fetch unknown block header let block_number = 1; @@ -799,7 +770,8 @@ fn db_block_header() { 21_u8.into(), ); - queries::insert_block_header(conn, &block_header2).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header2.commitment()); + queries::insert_block_header(conn, &block_header2, &dummy_signature).unwrap(); let res = queries::select_block_header_by_block_num(conn, None).unwrap(); assert_eq!(res.unwrap(), block_header2); @@ -808,80 +780,6 @@ fn db_block_header() { assert_eq!(res, [block_header, block_header2]); } -#[test] -#[miden_node_test_macro::enable_logging] -fn db_account() { - let mut conn = create_db(); - let conn = &mut conn; - let block_num: BlockNumber = 1.into(); - create_block(conn, block_num); - - // test empty table - let account_ids: Vec = - [ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, 1, 2, 3, 4, 5] - .iter() - .map(|acc_id| (*acc_id).try_into().unwrap()) - .collect(); - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); - - // test insertion - let account_id = ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE; - let account_commitment = num_to_word(0); - - let row_count = queries::upsert_accounts( - conn, - &[BlockAccountUpdate::new( - account_id.try_into().unwrap(), - account_commitment, - AccountUpdateDetails::Private, - )], - block_num, - ) - .unwrap(); - - assert_eq!(row_count, 1); - - // test successful query - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), - ) - .unwrap(); - assert_eq!( - res, - vec![AccountSummary { - account_id: account_id.try_into().unwrap(), - account_commitment, - block_num, - }] - ); - - // test query for update outside the block range - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - (block_num.as_u32() + 1).into()..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); - - // test query with unknown accounts - let res = queries::select_accounts_by_block_range( - conn, - &[6.try_into().unwrap(), 7.try_into().unwrap(), 8.try_into().unwrap()], - (block_num + 1)..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); -} - #[test] #[miden_node_test_macro::enable_logging] fn notes() { @@ -890,7 +788,7 @@ fn notes() { let block_num_1 = 1.into(); create_block(conn, block_num_1); - let block_range = BlockNumber::from(0)..=BlockNumber::from(1); + let block_range = BlockNumber::GENESIS..=BlockNumber::from(1); // test empty table let (res, last_included_block) = @@ -919,7 +817,7 @@ fn notes() { let new_note = create_note(sender); let note_index = BlockNoteIndex::new(0, 2).unwrap(); let tag = 5u32; - let note_metadata = NoteMetadata::new(sender, NoteType::Public, tag.into()); + let note_metadata = NoteMetadata::new(sender, NoteType::Public).with_tag(tag.into()); let values = [(note_index, new_note.id(), ¬e_metadata)]; let notes_db = BlockNoteTree::with_entries(values).unwrap(); @@ -930,7 +828,7 @@ fn notes() { note_index, note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), - metadata: NoteMetadata::new(sender, NoteType::Public, tag.into()), + metadata: NoteMetadata::new(sender, NoteType::Public).with_tag(tag.into()), details: Some(NoteDetails::from(&new_note)), inclusion_path: inclusion_path.clone(), }; @@ -1053,8 +951,8 @@ fn sql_account_storage_map_values_insertion() { queries::upsert_accounts(conn, &[mock_block_account_update(account_id, 0)], block2).unwrap(); let slot_name = StorageSlotName::mock(3); - let key1 = Word::from([1u32, 2, 3, 4]); - let key2 = Word::from([5u32, 6, 7, 8]); + let key1 = StorageMapKey::new(Word::from([1u32, 2, 3, 4])); + let key2 = StorageMapKey::new(Word::from([5u32, 6, 7, 8])); let value1 = Word::from([10u32, 11, 12, 13]); let value2 = Word::from([20u32, 21, 22, 23]); let value3 = Word::from([30u32, 31, 32, 33]); @@ -1112,9 +1010,9 @@ fn select_storage_map_sync_values() { let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); let slot_name = StorageSlotName::mock(5); - let key1 = num_to_word(1); - let key2 = num_to_word(2); - let key3 = num_to_word(3); + let key1 = StorageMapKey::from_index(1u32); + let key2 = StorageMapKey::from_index(2u32); + let key3 = StorageMapKey::from_index(3u32); let value1 = num_to_word(10); let value2 = num_to_word(20); let value3 = num_to_word(30); @@ -1214,42 +1112,6 @@ fn select_storage_map_sync_values() { assert_eq!(page.values, expected, "should return latest values ordered by key"); } -#[test] -fn select_storage_map_sync_values_for_network_account() { - let mut conn = create_db(); - let block_num = BlockNumber::from(1); - create_block(&mut conn, block_num); - - let (account_id, _) = - make_account_and_note(&mut conn, block_num, [42u8; 32], AccountStorageMode::Network); - let slot_name = StorageSlotName::mock(7); - let key = num_to_word(1); - let value = num_to_word(10); - - queries::insert_account_storage_map_value( - &mut conn, - account_id, - block_num, - slot_name.clone(), - key, - value, - ) - .unwrap(); - - let page = queries::select_account_storage_map_values( - &mut conn, - account_id, - BlockNumber::GENESIS..=block_num, - ) - .unwrap(); - - assert_eq!( - page.values, - vec![StorageMapValue { block_num, slot_name, key, value }], - "network accounts with public state should be accepted", - ); -} - // UTILITIES // ------------------------------------------------------------------------------------------- fn num_to_word(n: u64) -> Word { @@ -1275,15 +1137,22 @@ fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { .compile_component_code("test::interface", code_str) .unwrap(); - let component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supported_type(AccountType::RegularAccountUpdatableCode); + let component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test") + .with_supported_type(AccountType::RegularAccountUpdatableCode), + ) + .unwrap(); AccountBuilder::new(seed) .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap() } @@ -1302,7 +1171,7 @@ fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader Word::try_from([num, num, 0, 0]).unwrap(), Word::try_from([0, 0, num, num]).unwrap(), ), - NoteMetadata::new(account_id, NoteType::Public, NoteTag::new(num as u32)), + NoteMetadata::new(account_id, NoteType::Public).with_tag(NoteTag::new(num as u32)), )]; TransactionHeader::new_unchecked( @@ -1369,16 +1238,22 @@ fn mock_account_code_and_storage( let account_component_code = CodeBuilder::default() .compile_component_code("counter_contract::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("counter_contract").with_supports_all_types(), + ) + .unwrap(); AccountBuilder::new(init_seed.unwrap_or([0; 32])) .account_type(account_type) .storage_mode(storage_mode) .with_assets(assets) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap() } @@ -1412,7 +1287,7 @@ fn test_select_account_code_by_commitment() { &mut conn, &[BlockAccountUpdate::new( account.id(), - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), )], block_num_1, @@ -1460,7 +1335,7 @@ fn test_select_account_code_by_commitment_multiple_codes() { &mut conn, &[BlockAccountUpdate::new( account_v1.id(), - account_v1.commitment(), + account_v1.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account_v1).unwrap()), )], block_num_1, @@ -1493,7 +1368,7 @@ fn test_select_account_code_by_commitment_multiple_codes() { &mut conn, &[BlockAccountUpdate::new( account_v2.id(), - account_v2.commitment(), + account_v2.to_commitment(), AccountUpdateDetails::Delta(AccountDelta::try_from(account_v2).unwrap()), )], block_num_2, @@ -1518,18 +1393,21 @@ fn test_select_account_code_by_commitment_multiple_codes() { // ================================================================================================ /// Verifies genesis block with account containing vault assets can be inserted. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_account_assets() { +async fn genesis_with_account_assets() { use crate::genesis::GenesisState; let component_code = "pub proc foo push.1 end"; let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, Vec::new()) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + Vec::new(), + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); let fungible_asset = FungibleAsset::new(faucet_id, 1000).unwrap(); @@ -1539,32 +1417,35 @@ fn genesis_with_account_assets() { .storage_mode(AccountStorageMode::Public) .with_component(account_component) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); - let genesis_block = genesis_state.into_block().unwrap(); + GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } /// Verifies genesis block with account containing storage maps can be inserted. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_account_storage_map() { +async fn genesis_with_account_storage_map() { use miden_protocol::account::StorageMap; use crate::genesis::GenesisState; let storage_map = StorageMap::with_entries(vec![ ( - Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(1u32), Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), ), ( - Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(2u32), Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), ), ]) @@ -1580,29 +1461,35 @@ fn genesis_with_account_storage_map() { let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); - let genesis_block = genesis_state.into_block().unwrap(); + GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } /// Verifies genesis block with account containing both vault assets and storage maps. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_account_assets_and_storage() { +async fn genesis_with_account_assets_and_storage() { use miden_protocol::account::StorageMap; use crate::genesis::GenesisState; @@ -1611,7 +1498,7 @@ fn genesis_with_account_assets_and_storage() { let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); let storage_map = StorageMap::with_entries(vec![( - Word::from([Felt::new(100), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(100u32), Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]), )]) .unwrap(); @@ -1626,31 +1513,37 @@ fn genesis_with_account_assets_and_storage() { let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([3u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); - let genesis_block = genesis_state.into_block().unwrap(); + GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } /// Verifies genesis block with multiple accounts of different types. /// Tests realistic genesis scenario with basic accounts, assets, and storage. -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_with_multiple_accounts() { +async fn genesis_with_multiple_accounts() { use miden_protocol::account::StorageMap; use crate::genesis::GenesisState; @@ -1658,15 +1551,21 @@ fn genesis_with_multiple_accounts() { let account_component_code = CodeBuilder::default() .compile_component_code("foo::interface", "pub proc foo push.1 end") .unwrap(); - let account_component1 = AccountComponent::new(account_component_code, Vec::new()) - .unwrap() - .with_supports_all_types(); + let account_component1 = AccountComponent::new( + account_component_code, + Vec::new(), + AccountComponentMetadata::new("foo").with_supports_all_types(), + ) + .unwrap(); let account1 = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component1) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -1676,21 +1575,27 @@ fn genesis_with_multiple_accounts() { let account_component_code = CodeBuilder::default() .compile_component_code("bar::interface", "pub proc bar push.2 end") .unwrap(); - let account_component2 = AccountComponent::new(account_component_code, Vec::new()) - .unwrap() - .with_supports_all_types(); + let account_component2 = AccountComponent::new( + account_component_code, + Vec::new(), + AccountComponentMetadata::new("bar").with_supports_all_types(), + ) + .unwrap(); let account2 = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component2) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); let storage_map = StorageMap::with_entries(vec![( - Word::from([Felt::new(5), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(5u32), Word::from([Felt::new(15), Felt::new(25), Felt::new(35), Felt::new(45)]), )]) .unwrap(); @@ -1700,15 +1605,21 @@ fn genesis_with_multiple_accounts() { let account_component_code = CodeBuilder::default() .compile_component_code("baz::interface", "pub proc baz push.3 end") .unwrap(); - let account_component3 = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component3 = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("baz").with_supports_all_types(), + ) + .unwrap(); let account3 = AccountBuilder::new([3u8; 32]) .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component3) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -1717,9 +1628,9 @@ fn genesis_with_multiple_accounts() { test_fee_params(), 1, 0, - SecretKey::random(), + random_secret_key(), ); - let genesis_block = genesis_state.into_block().unwrap(); + let genesis_block = genesis_state.into_block().await.unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); } @@ -1748,7 +1659,7 @@ fn regression_1461_full_state_delta_inserts_vault_assets() { let block_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(account_delta), ); @@ -1872,7 +1783,7 @@ fn serialization_symmetry_note_metadata() { // Use a tag that roundtrips properly - NoteTag::LocalAny stores the full u32 including type // bits let tag = NoteTag::with_account_target(sender); - let metadata = NoteMetadata::new(sender, NoteType::Public, tag); + let metadata = NoteMetadata::new(sender, NoteType::Public).with_tag(tag); let bytes = metadata.to_bytes(); let restored = NoteMetadata::read_from_bytes(&bytes).unwrap(); @@ -1917,7 +1828,8 @@ fn db_roundtrip_block_header() { ); // Insert - queries::insert_block_header(&mut conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(&mut conn, &block_header, &dummy_signature).unwrap(); // Retrieve let retrieved = @@ -1964,7 +1876,7 @@ fn db_roundtrip_account() { Some([99u8; 32]), ); let account_id = account.id(); - let account_commitment = account.commitment(); + let account_commitment = account.to_commitment(); // Insert with full delta (like genesis) let account_delta = AccountDelta::try_from(account.clone()).unwrap(); @@ -2045,47 +1957,6 @@ fn db_roundtrip_notes() { ); } -#[test] -#[miden_node_test_macro::enable_logging] -fn db_roundtrip_transactions() { - let mut conn = create_db(); - let block_num = BlockNumber::from(1); - create_block(&mut conn, block_num); - - let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) - .unwrap(); - - let tx = mock_block_transaction(account_id, 1); - let ordered_tx = OrderedTransactionHeaders::new_unchecked(vec![tx.clone()]); - - // Insert - queries::insert_transactions(&mut conn, block_num, &ordered_tx).unwrap(); - - // Retrieve - let retrieved = queries::select_transactions_by_accounts_and_block_range( - &mut conn, - &[account_id], - BlockNumber::from(0)..=BlockNumber::from(2), - ) - .unwrap(); - - assert_eq!(retrieved.len(), 1, "Should have one transaction"); - let retrieved_tx = &retrieved[0]; - - assert_eq!( - tx.account_id(), - retrieved_tx.account_id, - "AccountId DB roundtrip must be symmetric" - ); - assert_eq!( - tx.id(), - retrieved_tx.transaction_id, - "TransactionId DB roundtrip must be symmetric" - ); - assert_eq!(block_num, retrieved_tx.block_num, "Block number must match"); -} - #[test] #[miden_node_test_macro::enable_logging] fn db_roundtrip_vault_assets() { @@ -2135,7 +2006,7 @@ fn db_roundtrip_storage_map_values() { queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) .unwrap(); let slot_name = StorageSlotName::mock(5); - let key = num_to_word(12345); + let key = StorageMapKey::from_index(12345u32); let value = num_to_word(67890); queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) @@ -2181,11 +2052,11 @@ fn db_roundtrip_account_storage_with_maps() { // Create storage with both value slots and map slots let storage_map = StorageMap::with_entries(vec![ ( - Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(1u32), Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), ), ( - Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + StorageMapKey::from_index(2u32), Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), ), ]) @@ -2201,15 +2072,21 @@ fn db_roundtrip_account_storage_with_maps() { let account_component_code = CodeBuilder::default() .compile_component_code("test::interface", component_code) .unwrap(); - let account_component = AccountComponent::new(account_component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + account_component_code, + component_storage, + AccountComponentMetadata::new("test").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([50u8; 32]) .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); @@ -2221,7 +2098,7 @@ fn db_roundtrip_account_storage_with_maps() { let account_delta = AccountDelta::try_from(account.clone()).unwrap(); let block_update = BlockAccountUpdate::new( account_id, - account.commitment(), + account.to_commitment(), AccountUpdateDetails::Delta(account_delta), ); queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); @@ -2272,15 +2149,15 @@ fn db_roundtrip_account_storage_with_maps() { assert!(account_info.details.is_some(), "Public account should have details"); let retrieved_account = account_info.details.unwrap(); assert_eq!( - account.commitment(), - retrieved_account.commitment(), + account.to_commitment(), + retrieved_account.to_commitment(), "Full account commitment must match after DB roundtrip" ); } #[test] #[miden_node_test_macro::enable_logging] -fn test_note_metadata_with_attachment_roundtrip() { +fn db_roundtrip_note_metadata_attachment() { let mut conn = create_db(); let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); @@ -2294,8 +2171,7 @@ fn test_note_metadata_with_attachment_roundtrip() { // Create NoteMetadata with the attachment let metadata = - NoteMetadata::new(account_id, NoteType::Public, NoteTag::with_account_target(account_id)) - .with_attachment(attachment.clone()); + NoteMetadata::new(account_id, NoteType::Public).with_attachment(attachment.clone()); let note = NoteRecord { block_num, @@ -2331,3 +2207,298 @@ fn test_note_metadata_with_attachment_roundtrip() { "NetworkAccountTarget should have the correct target account ID" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn test_prune_history() { + let mut conn = create_db(); + let conn = &mut conn; + + let public_account_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Create blocks around the retention window. + const GENESIS_BLOCK_NUM: u32 = 0; + const OLD_BLOCK_OFFSET: u32 = 1; + const CUTOFF_BLOCK_OFFSET: u32 = 2; + const UPDATE_BLOCK_OFFSET: u32 = 3; + + let block_0: BlockNumber = GENESIS_BLOCK_NUM.into(); + let block_old: BlockNumber = OLD_BLOCK_OFFSET.into(); + let block_cutoff: BlockNumber = CUTOFF_BLOCK_OFFSET.into(); + let block_update: BlockNumber = UPDATE_BLOCK_OFFSET.into(); + let block_tip: BlockNumber = (HISTORICAL_BLOCK_RETENTION + CUTOFF_BLOCK_OFFSET).into(); + + for block in [block_0, block_old, block_cutoff, block_update, block_tip] { + create_block(conn, block); + } + + // Create account + for block in [block_0, block_old, block_cutoff, block_update, block_tip] { + queries::upsert_accounts(conn, &[mock_block_account_update(public_account_id, 0)], block) + .unwrap(); + } + + // Insert vault assets at different blocks + let vault_key_old = AssetVaultKey::new_unchecked(num_to_word(100)); + let vault_key_cutoff = AssetVaultKey::new_unchecked(num_to_word(200)); + let vault_key_recent = AssetVaultKey::new_unchecked(num_to_word(300)); + let asset_1 = Asset::Fungible(FungibleAsset::new(public_account_id, 1000).unwrap()); + let asset_2 = Asset::Fungible(FungibleAsset::new(public_account_id, 2000).unwrap()); + let asset_3 = Asset::Fungible(FungibleAsset::new(public_account_id, 3000).unwrap()); + + // Old entry at block_old (should be deleted when cutoff is at block_cutoff for + // chain_tip=block_tip) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_old, + vault_key_old, + Some(asset_1), + ) + .unwrap(); + + // Entry exactly at cutoff (block_cutoff, should be retained) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_cutoff, + vault_key_cutoff, + Some(asset_2), + ) + .unwrap(); + + // Recent entry (should always be retained) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_tip, + vault_key_recent, + Some(asset_3), + ) + .unwrap(); + + // Update an entry to create a non-latest version + let updated_asset = Asset::Fungible(FungibleAsset::new(public_account_id, 1500).unwrap()); + queries::insert_account_vault_asset( + conn, + public_account_id, + block_update, + vault_key_old, + Some(updated_asset), + ) + .unwrap(); + + // Insert storage map values at different blocks + let slot_name = StorageSlotName::mock(5); + let map_key_old = StorageMapKey::from_index(10u32); + let map_key_cutoff = StorageMapKey::from_index(20u32); + let map_key_recent = StorageMapKey::from_index(30u32); + let value_1 = num_to_word(111); + let value_2 = num_to_word(222); + let value_3 = num_to_word(333); + let value_updated = num_to_word(444); + + // Old storage map entry at block_old + insert_account_storage_map_value( + conn, + public_account_id, + block_old, + slot_name.clone(), + map_key_old, + value_1, + ) + .unwrap(); + + // Storage map entry at cutoff boundary (block_cutoff) + insert_account_storage_map_value( + conn, + public_account_id, + block_cutoff, + slot_name.clone(), + map_key_cutoff, + value_2, + ) + .unwrap(); + + // Recent storage map entry + insert_account_storage_map_value( + conn, + public_account_id, + block_tip, + slot_name.clone(), + map_key_recent, + value_3, + ) + .unwrap(); + + // Update map_key_old to create a non-latest entry at block_update + insert_account_storage_map_value( + conn, + public_account_id, + block_update, + slot_name.clone(), + map_key_old, + value_updated, + ) + .unwrap(); + + // Verify initial state - should have 4 vault assets and 4 storage map values + let (_, initial_vault_assets) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert_eq!(initial_vault_assets.len(), 4, "should have 4 vault assets before cleanup"); + + let initial_storage_values = + queries::select_account_storage_map_values(conn, public_account_id, block_0..=block_tip) + .unwrap(); + assert_eq!( + initial_storage_values.values.len(), + 4, + "should have 4 storage map values before cleanup" + ); + + // Run cleanup with chain_tip = block_tip, cutoff will be block_tip - HISTORICAL_BLOCK_RETENTION + // = block_cutoff + let (vault_deleted, storage_deleted) = queries::prune_history(conn, block_tip).unwrap(); + + // Verify deletions occurred + assert_eq!(vault_deleted, 1, "should delete 1 old vault asset"); + assert_eq!(storage_deleted, 1, "should delete 1 old storage map value"); + + // Verify remaining vault assets - should have 3 (cutoff, update, tip) + let (_, remaining_vault_assets) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert_eq!(remaining_vault_assets.len(), 3, "should have 3 vault assets after cleanup"); + + // Verify no vault asset at block_old remains + assert!( + !remaining_vault_assets.iter().any(|v| v.block_num == block_old), + "block_old vault asset should be deleted" + ); + + // Verify vault assets at block_cutoff, block_update, block_tip remain + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_cutoff), + "block_cutoff vault asset should be retained (at cutoff)" + ); + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_update), + "block_update vault asset should be retained" + ); + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_tip), + "block_tip vault asset should be retained" + ); + + // Verify remaining storage map values - should have 3 (cutoff, update, tip) + let remaining_storage_values = + queries::select_account_storage_map_values(conn, public_account_id, block_0..=block_tip) + .unwrap(); + assert_eq!( + remaining_storage_values.values.len(), + 3, + "should have 3 storage map values after cleanup" + ); + + // Verify no storage map value at block_old remains + assert!( + !remaining_storage_values.values.iter().any(|v| v.block_num == block_old), + "block_old storage map value should be deleted" + ); + + // Verify storage map values at block_cutoff, block_update, block_tip remain + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_cutoff), + "block_cutoff storage map value should be retained (at cutoff)" + ); + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_update), + "block_update storage map value should be retained" + ); + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_tip), + "block_tip storage map value should be retained" + ); + + // Test that is_latest=true entries are never deleted, even if old + // Insert an old entry marked as latest + let vault_key_old_latest = AssetVaultKey::new_unchecked(num_to_word(999)); + let asset_old = Asset::Fungible(FungibleAsset::new(public_account_id, 9999).unwrap()); + queries::insert_account_vault_asset( + conn, + public_account_id, + block_0, + vault_key_old_latest, + Some(asset_old), + ) + .unwrap(); + + // This entry at block 0 is marked as is_latest=true by insert_account_vault_asset + // Run cleanup again + let (vault_deleted_2, _) = queries::prune_history(conn, block_tip).unwrap(); + + // The old latest entry should not be deleted (vault_deleted_2 should be 0) + assert_eq!(vault_deleted_2, 0, "should not delete any is_latest=true entries"); + + // Verify the old latest entry still exists + let (_, vault_assets_with_latest) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert!( + vault_assets_with_latest + .iter() + .any(|v| v.block_num == block_0 && v.vault_key == vault_key_old_latest), + "is_latest=true entry should be retained even if old" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_transactions() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let bob = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(bob, 0)], block_num).unwrap(); + + // Build two transaction headers with distinct data + let tx1 = mock_block_transaction(bob, 1); + let tx2 = mock_block_transaction(bob, 2); + let ordered = OrderedTransactionHeaders::new_unchecked(vec![tx1.clone(), tx2.clone()]); + + // Insert + let count = queries::insert_transactions(&mut conn, block_num, &ordered).unwrap(); + assert_eq!(count, 2, "Should insert 2 transactions"); + + // Retrieve + let (last_block, records) = + queries::select_transactions_records(&mut conn, &[bob], BlockNumber::GENESIS..=block_num) + .unwrap(); + assert_eq!(last_block, block_num, "Last block should match"); + assert_eq!(records.len(), 2, "Should retrieve 2 transactions"); + + // Verify each transaction roundtrips correctly. + // Records are ordered by (block_num, transaction_id), so match by ID. + let originals = [&tx1, &tx2]; + for record in &records { + let original = originals + .iter() + .find(|tx| tx.id() == record.transaction_id) + .expect("Retrieved transaction should match one of the originals"); + // Asset symmetry + assert_eq!(record.transaction_id, original.id(),); + assert_eq!(record.account_id, original.account_id(),); + assert_eq!(record.block_num, block_num); + assert_eq!(record.initial_state_commitment, original.initial_state_commitment(),); + assert_eq!(record.final_state_commitment, original.final_state_commitment(),); + + // Input notes are stored as nullifiers only + let expected_nullifiers: Vec = + original.input_notes().iter().map(InputNoteCommitment::nullifier).collect(); + assert_eq!(record.nullifiers, expected_nullifiers,); + + // Output notes are stored as note IDs only + let expected_note_ids: Vec = + original.output_notes().iter().map(NoteHeader::id).collect(); + assert_eq!(record.output_notes, expected_note_ids,); + } +} diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 6796505808..397c173866 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -1,10 +1,9 @@ -use std::any::type_name; use std::io; -use deadpool_sync::InteractError; use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; +use miden_node_utils::ErrorReport; use miden_node_utils::limiter::QueryLimitError; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -18,7 +17,6 @@ use miden_protocol::errors::{ AccountTreeError, AssetError, AssetVaultError, - FeeError, NoteError, NullifierTreeError, StorageMapError, @@ -29,7 +27,6 @@ use thiserror::Error; use tokio::sync::oneshot::error::RecvError; use tonic::Status; -use crate::db::manager::ConnectionManagerError; use crate::db::models::conv::DatabaseTypeConversionError; use crate::inner_forest::{InnerForestError, WitnessError}; @@ -40,60 +37,30 @@ use crate::inner_forest::{InnerForestError, WitnessError}; pub enum DatabaseError { // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES // --------------------------------------------------------------------------------------------- - #[error("account is incomplete")] - AccountIncomplete, #[error("account error")] AccountError(#[from] AccountError), - #[error("account delta error")] - AccountDeltaError(#[from] AccountDeltaError), #[error("asset vault error")] AssetVaultError(#[from] AssetVaultError), #[error("asset error")] AssetError(#[from] AssetError), #[error("closed channel")] ClosedChannel(#[from] RecvError), + #[error("database error")] + DatabaseError(#[from] miden_node_db::DatabaseError), #[error("deserialization failed")] DeserializationError(#[from] DeserializationError), - #[error("hex parsing error")] - FromHexError(#[from] hex::FromHexError), #[error("I/O error")] IoError(#[from] io::Error), #[error("merkle error")] MerkleError(#[from] MerkleError), - #[error("network account error")] - NetworkAccountError(#[from] NetworkAccountError), #[error("note error")] NoteError(#[from] NoteError), #[error("storage map error")] StorageMapError(#[from] StorageMapError), - #[error("setup deadpool connection pool failed")] - Deadpool(#[from] deadpool::managed::PoolError), - #[error("setup deadpool connection pool failed")] - ConnectionPoolObtainError(#[from] Box), #[error(transparent)] Diesel(#[from] diesel::result::Error), - #[error("sqlite FFI boundary NUL termination error (not much you can do, file an issue)")] - DieselSqliteFfi(#[from] std::ffi::NulError), - #[error(transparent)] - DeadpoolDiesel(#[from] deadpool_diesel::Error), - #[error(transparent)] - PoolRecycle(#[from] deadpool::managed::RecycleError), - #[error("summing over column {column} of table {table} exceeded {limit}")] - ColumnSumExceedsLimit { - table: &'static str, - column: &'static str, - limit: &'static str, - #[source] - source: Box, - }, #[error(transparent)] QueryParamLimit(#[from] QueryLimitError), - #[error("conversion from SQL to rust type {to} failed")] - ConversionSqlToRust { - #[source] - inner: Option>, - to: &'static str, - }, // OTHER ERRORS // --------------------------------------------------------------------------------------------- @@ -101,39 +68,16 @@ pub enum DatabaseError { AccountCommitmentsMismatch { expected: Word, calculated: Word }, #[error("account {0} not found")] AccountNotFoundInDb(AccountId), - #[error("account {0} state at block height {1} not found")] - AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), - #[error("block {0} not found in database")] - BlockNotFound(BlockNumber), - #[error("historical block {block_num} not available: {reason}")] - HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, - #[error("invalid storage slot type: {0}")] - InvalidStorageSlotType(i32), #[error("data corrupted: {0}")] DataCorrupted(String), - #[error("SQLite pool interaction failed: {0}")] - InteractError(String), - #[error("invalid Felt: {0}")] - InvalidFelt(String), - #[error( - "unsupported database version. There is no migration chain from/to this version. \ - Remove all database files and try again." - )] - UnsupportedDatabaseVersion, - #[error("schema verification failed")] - SchemaVerification(#[from] SchemaVerificationError), - #[error(transparent)] - ConnectionManager(#[from] ConnectionManagerError), #[error(transparent)] SqlValueConversion(#[from] DatabaseTypeConversionError), - #[error("Not implemented: {0}")] - NotImplemented(String), #[error("storage root not found for account {account_id}, slot {slot_name}, block {block_num}")] StorageRootNotFound { account_id: AccountId, @@ -142,35 +86,6 @@ pub enum DatabaseError { }, } -impl DatabaseError { - /// Converts from `InteractError` - /// - /// Note: Required since `InteractError` has at least one enum - /// variant that is _not_ `Send + Sync` and hence prevents the - /// `Sync` auto implementation. - /// This does an internal conversion to string while maintaining - /// convenience. - /// - /// Using `MSG` as const so it can be called as - /// `.map_err(DatabaseError::interact::<"Your message">)` - pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { - let msg = msg.to_string(); - Self::InteractError(format!("{msg} failed: {e:?}")) - } - - /// Failed to convert an SQL entry to a rust representation - pub fn conversiont_from_sql(err: MaybeE) -> DatabaseError - where - MaybeE: Into>, - E: std::error::Error + Send + Sync + 'static, - { - DatabaseError::ConversionSqlToRust { - inner: err.into().map(|err| Box::new(err) as Box), - to: type_name::(), - } - } -} - impl From for Status { fn from(err: DatabaseError) -> Self { match err { @@ -203,7 +118,7 @@ pub enum StateInitializationError { #[error("failed to load block store")] BlockStoreLoadError(#[source] std::io::Error), #[error("failed to load database")] - DatabaseLoadError(#[from] DatabaseSetupError), + DatabaseLoadError(#[source] DatabaseError), #[error("inner forest error")] InnerForestError(#[from] InnerForestError), #[error( @@ -223,36 +138,6 @@ pub enum StateInitializationError { AccountToDeltaConversionFailed(String), } -#[derive(Debug, Error)] -pub enum DatabaseSetupError { - #[error("I/O error")] - Io(#[from] io::Error), - #[error("database error")] - Database(#[from] DatabaseError), - #[error("genesis block error")] - GenesisBlock(#[from] GenesisError), - #[error("pool build error")] - PoolBuild(#[from] deadpool::managed::BuildError), - #[error("Setup deadpool connection pool failed")] - Pool(#[from] deadpool::managed::PoolError), -} - -#[derive(Debug, Error)] -pub enum GenesisError { - // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES - // --------------------------------------------------------------------------------------------- - #[error("database error")] - Database(#[from] DatabaseError), - #[error("failed to build genesis account tree")] - AccountTree(#[source] AccountTreeError), - #[error("failed to deserialize genesis file")] - GenesisFileDeserialization(#[from] DeserializationError), - #[error("fee cannot be created")] - Fee(#[from] FeeError), - #[error("failed to build account delta from account")] - AccountDelta(AccountError), -} - // ENDPOINT ERRORS // ================================================================================================= #[derive(Error, Debug)] @@ -313,6 +198,16 @@ pub enum ApplyBlockError { DbUpdateTaskFailed(String), } +impl From for Status { + fn from(err: ApplyBlockError) -> Self { + match err { + ApplyBlockError::InvalidBlockError(_) => Status::invalid_argument(err.as_report()), + + _ => Status::internal(err.as_report()), + } + } +} + #[derive(Error, Debug, GrpcError)] pub enum GetBlockHeaderError { #[error("database error")] @@ -348,6 +243,19 @@ pub enum StateSyncError { FailedToBuildMmrDelta(#[from] MmrError), } +#[derive(Error, Debug, GrpcError)] +pub enum SyncChainMmrError { + #[error("invalid block range")] + InvalidBlockRange(#[source] InvalidBlockRange), + #[error("start block is not known")] + FutureBlock { + chain_tip: BlockNumber, + block_from: BlockNumber, + }, + #[error("malformed block number")] + DeserializationFailed(#[source] ConversionError), +} + impl From for StateSyncError { fn from(value: diesel::result::Error) -> Self { Self::DatabaseError(DatabaseError::from(value)) @@ -359,6 +267,9 @@ pub enum NoteSyncError { #[error("database error")] #[grpc(internal)] DatabaseError(#[from] DatabaseError), + #[error("database error")] + #[grpc(internal)] + UnderlyingDatabaseError(#[from] miden_node_db::DatabaseError), #[error("block headers table is empty")] #[grpc(internal)] EmptyBlockHeadersTable, @@ -478,6 +389,26 @@ pub enum GetBlockByNumberError { DeserializationFailed(#[from] DeserializationError), } +// GET ACCOUNT ERRORS +// ================================================================================================ + +#[derive(Debug, Error, GrpcError)] +pub enum GetAccountError { + #[error("database error")] + #[grpc(internal)] + DatabaseError(#[from] DatabaseError), + #[error("malformed request")] + DeserializationFailed(#[from] ConversionError), + #[error("account {0} not found at block {1}")] + AccountNotFound(AccountId, BlockNumber), + #[error("account {0} is not public")] + AccountNotPublic(AccountId), + #[error("block {0} is unknown")] + UnknownBlock(BlockNumber), + #[error("block {0} has been pruned")] + BlockPruned(BlockNumber), +} + // GET NOTES BY ID ERRORS // ================================================================================================ @@ -546,28 +477,81 @@ pub enum GetWitnessesError { WitnessError(#[from] WitnessError), } -// SCHEMA VERIFICATION ERRORS -// ================================================================================================= +#[cfg(test)] +mod get_account_error_tests { + use miden_protocol::account::AccountId; + use miden_protocol::block::BlockNumber; + use miden_protocol::testing::account_id::AccountIdBuilder; + use tonic::Status; -/// Errors that can occur during schema verification. -#[derive(Debug, Error)] -pub enum SchemaVerificationError { - #[error("failed to create in-memory reference database")] - InMemoryDbCreation(#[source] diesel::ConnectionError), - #[error("failed to apply migrations to reference database")] - MigrationApplication(#[source] Box), - #[error("failed to extract schema from database")] - SchemaExtraction(#[source] diesel::result::Error), - #[error( - "schema mismatch: expected {expected_count} objects, found {actual_count} \ - ({missing_count} missing, {extra_count} unexpected)" - )] - Mismatch { - expected_count: usize, - actual_count: usize, - missing_count: usize, - extra_count: usize, - }, + use super::GetAccountError; + + fn test_account_id() -> AccountId { + AccountIdBuilder::new().build_with_seed([1; 32]) + } + + #[test] + fn unknown_block_returns_invalid_argument() { + let block = BlockNumber::from(999); + let err = GetAccountError::UnknownBlock(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + assert!(!status.metadata().is_empty() || !status.details().is_empty()); + } + + #[test] + fn block_pruned_returns_invalid_argument() { + let block = BlockNumber::from(1); + let err = GetAccountError::BlockPruned(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_public_returns_invalid_argument() { + let err = GetAccountError::AccountNotPublic(test_account_id()); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_found_returns_invalid_argument_with_block_context() { + let account_id = test_account_id(); + let block = BlockNumber::from(5); + let err = GetAccountError::AccountNotFound(account_id, block); + let msg = err.to_string(); + assert!(msg.contains("not found"), "error message should mention 'not found'"); + assert!(msg.contains("block"), "error message should include block context"); + + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn each_variant_has_unique_discriminant() { + let account_id = test_account_id(); + let block = BlockNumber::from(1); + + let errors = [ + GetAccountError::AccountNotFound(account_id, block), + GetAccountError::AccountNotPublic(account_id), + GetAccountError::UnknownBlock(block), + GetAccountError::BlockPruned(block), + ]; + + let codes: Vec = errors.iter().map(|e| e.api_error().api_code()).collect(); + + // All non-internal variants should have unique, non-zero discriminants + for &code in &codes { + assert_ne!(code, 0, "non-internal variants should not map to Internal (0)"); + } + + // Check uniqueness + let mut sorted = codes.clone(); + sorted.sort_unstable(); + sorted.dedup(); + assert_eq!(sorted.len(), codes.len(), "all error variants should have unique codes"); + } } // Do not scope for `cfg(test)` - if it the traitbounds don't suffice the issue will already appear @@ -580,9 +564,7 @@ mod compile_tests { AccountDeltaError, AccountError, DatabaseError, - DatabaseSetupError, DeserializationError, - GenesisError, NetworkAccountError, NoteError, RecvError, @@ -591,7 +573,7 @@ mod compile_tests { /// Ensure all enum variants remain compat with the desired /// trait bounds. Otherwise one gets very unwieldy errors. - #[allow(dead_code)] + #[expect(dead_code)] fn assumed_trait_bounds_upheld() { fn ensure_is_error(_phony: PhantomData) where @@ -612,9 +594,7 @@ mod compile_tests { ensure_is_error::>(PhantomData); ensure_is_error::(PhantomData); - ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); - ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); ensure_is_error::>(PhantomData); } diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index b39495c872..3ea497d547 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use miden_protocol::account::AccountId; use miden_protocol::errors::{ AccountDeltaError, @@ -12,18 +14,25 @@ use miden_standards::account::wallets::BasicWalletError; use crate::genesis::config::TokenSymbolStr; -#[allow(missing_docs, reason = "Error variants must be descriptive by themselves")] #[derive(Debug, thiserror::Error)] pub enum GenesisConfigError { #[error(transparent)] Toml(#[from] toml::de::Error), + #[error("failed to read config file at {1}")] + ConfigFileRead(#[source] std::io::Error, PathBuf), + #[error("failed to read account file at {1}")] + AccountFileRead(#[source] std::io::Error, PathBuf), + #[error("native faucet from file {path} is not a fungible faucet")] + NativeFaucetNotFungible { path: PathBuf }, #[error("account translation from config to state failed")] Account(#[from] AccountError), #[error("asset translation from config to state failed")] Asset(#[from] AssetError), #[error("adding assets to account failed")] AccountDelta(#[from] AccountDeltaError), - #[error("the defined asset {symbol:?} has no corresponding faucet")] + #[error( + "the defined asset '{symbol}' has no corresponding faucet, or the faucet was provided as an account file" + )] MissingFaucetDefinition { symbol: TokenSymbolStr }, #[error("account with id {account_id} was referenced but is not part of given genesis state")] MissingGenesisAccount { account_id: AccountId }, @@ -41,10 +50,10 @@ pub enum GenesisConfigError { BasicWallet(#[from] BasicWalletError), #[error(r#"incompatible combination of `max_supply` ({max_supply})" and `decimals` ({decimals}) exceeding the allowed value range of an `u64`"#)] OutOfRange { max_supply: u64, decimals: u8 }, - #[error("Found duplicate faucet definition for token symbol {symbol:?}")] + #[error("Found duplicate faucet definition for token symbol '{symbol}'")] DuplicateFaucetDefinition { symbol: TokenSymbolStr }, #[error( - "Total issuance {total_issuance} of {symbol:?} exceeds faucet's maximum issuance of {max_supply}" + "Total issuance {total_issuance} of '{symbol}' exceeds faucet's maximum issuance of {max_supply}" )] MaxIssuanceExceeded { symbol: TokenSymbolStr, diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index e7abe8b58d..f2cfe40b8f 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -1,18 +1,19 @@ //! Describe a subset of the genesis manifest in easily human readable format use std::cmp::Ordering; +use std::path::{Path, PathBuf}; use std::str::FromStr; use indexmap::IndexMap; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_protocol::account::auth::AuthSecretKey; +use miden_node_utils::signer::BlockSigner; +use miden_protocol::account::auth::{AuthScheme, AuthSecretKey}; use miden_protocol::account::{ Account, AccountBuilder, AccountDelta, AccountFile, AccountId, - AccountStorage, AccountStorageDelta, AccountStorageMode, AccountType, @@ -24,10 +25,10 @@ use miden_protocol::asset::{FungibleAsset, TokenSymbol}; use miden_protocol::block::FeeParameters; use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey as RpoSecretKey; use miden_protocol::errors::TokenSymbolError; -use miden_protocol::{Felt, FieldElement, ONE, ZERO}; -use miden_standards::AuthScheme; -use miden_standards::account::auth::AuthFalcon512Rpo; -use miden_standards::account::faucets::BasicFungibleFaucet; +use miden_protocol::{Felt, FieldElement, ONE}; +use miden_standards::AuthMethod; +use miden_standards::account::auth::AuthSingleSig; +use miden_standards::account::faucets::{BasicFungibleFaucet, TokenMetadata}; use miden_standards::account::wallets::create_basic_wallet; use rand::distr::weighted::Weight; use rand::{Rng, SeedableRng}; @@ -42,27 +43,55 @@ use self::errors::GenesisConfigError; #[cfg(test)] mod tests; +const DEFAULT_NATIVE_FAUCET_SYMBOL: &str = "MIDEN"; +const DEFAULT_NATIVE_FAUCET_DECIMALS: u8 = 6; +const DEFAULT_NATIVE_FAUCET_MAX_SUPPLY: u64 = 100_000_000_000_000_000; + // GENESIS CONFIG // ================================================================================================ +/// An account loaded from a `.mac` file (path relative to genesis config directory). +/// +/// Notice: Generic accounts are not validated (e.g. that their vault assets reference known +/// faucets), leaving the responsibility of ensuring valid genesis state to the operator. +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(deny_unknown_fields)] +struct GenericAccountConfig { + path: PathBuf, +} + /// Specify a set of faucets and wallets with assets for easier test deployments. /// /// Notice: Any faucet must be declared _before_ it's use in a wallet/regular account. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(deny_unknown_fields)] pub struct GenesisConfig { version: u32, timestamp: u32, - native_faucet: NativeFaucet, + /// Override the native faucet with a custom faucet account. + /// + /// If unspecified, a default native faucet will be used with: + /// + /// ```toml + /// symbol = "MIDEN" + /// decimals = 6 + /// max_supply = 100_000_000_000_000_000 + /// ``` + #[serde(default)] + native_faucet: Option, fee_parameters: FeeParameterConfig, #[serde(default)] wallet: Vec, #[serde(default)] fungible_faucet: Vec, + #[serde(default)] + account: Vec, + #[serde(skip)] + config_dir: PathBuf, } impl Default for GenesisConfig { fn default() -> Self { - let miden = TokenSymbolStr::from_str("MIDEN").unwrap(); Self { version: 1_u32, timestamp: u32::try_from( @@ -73,30 +102,44 @@ impl Default for GenesisConfig { ) .expect("Timestamp should fit into u32"), wallet: vec![], - native_faucet: NativeFaucet { - max_supply: 100_000_000_000_000_000u64, - decimals: 6u8, - symbol: miden.clone(), - }, + native_faucet: None, fee_parameters: FeeParameterConfig { verification_base_fee: 0 }, fungible_faucet: vec![], + account: vec![], + config_dir: PathBuf::from("."), } } } impl GenesisConfig { - /// Read the genesis accounts from a toml formatted string + /// Read the genesis config from a TOML file. + /// + /// The parent directory of `path` is used to resolve relative paths for account files + /// referenced in the configuration (e.g., `[[account]]` entries with `path` fields). /// /// Notice: It will generate the specified case during [`fn into_state`]. - pub fn read_toml(toml_str: &str) -> Result { - let me = toml::from_str::(toml_str)?; - Ok(me) + pub fn read_toml_file(path: &Path) -> Result { + let toml_str = fs_err::read_to_string(path) + .map_err(|e| GenesisConfigError::ConfigFileRead(e, path.to_path_buf()))?; + let config_dir = path.parent().expect("config file path must have a parent directory"); + Self::read_toml(&toml_str, config_dir) + } + + /// Parse a genesis config from a TOML formatted string. + /// + /// The `config_dir` parameter is stored so that relative paths for account files + /// (e.g., `[[account]]` entries with `path` fields, or native faucet file references) + /// can be resolved later during [`Self::into_state`]. + fn read_toml(toml_str: &str, config_dir: &Path) -> Result { + let mut config: Self = toml::from_str(toml_str)?; + config.config_dir = config_dir.to_path_buf(); + Ok(config) } /// Convert the in memory representation into the new genesis state /// /// Also returns the set of secrets for the generated accounts. - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] pub fn into_state( self, signer: S, @@ -108,10 +151,20 @@ impl GenesisConfig { fee_parameters, fungible_faucet: fungible_faucet_configs, wallet: wallet_configs, - .. + account: account_entries, + config_dir, } = self; - let symbol = native_faucet.symbol.clone(); + // Load account files from disk + let file_loaded_accounts = account_entries + .into_iter() + .map(|acc| { + let full_path = config_dir.join(&acc.path); + let account_file = AccountFile::read(&full_path) + .map_err(|e| GenesisConfigError::AccountFileRead(e, full_path.clone()))?; + Ok(account_file.account) + }) + .collect::, GenesisConfigError>>()?; let mut wallet_accounts = Vec::::new(); // Every asset sitting in a wallet, has to reference a faucet for that asset @@ -121,10 +174,21 @@ impl GenesisConfig { // accounts/sign transactions let mut secrets = Vec::new(); - // First setup all the faucets - for fungible_faucet_config in std::iter::once(native_faucet.to_faucet_config()) - .chain(fungible_faucet_configs.into_iter()) - { + // Handle native faucet: build from defaults or load from file + let (native_faucet_account, symbol, native_secret) = + NativeFaucetConfig(native_faucet).build_account(&config_dir)?; + if let Some(secret_key) = native_secret { + secrets.push(( + format!("faucet_{symbol}.mac", symbol = symbol.to_string().to_lowercase()), + native_faucet_account.id(), + secret_key, + )); + } + let native_faucet_account_id = native_faucet_account.id(); + faucet_accounts.insert(symbol.clone(), native_faucet_account); + + // Setup additional fungible faucets from parameters + for fungible_faucet_config in fungible_faucet_configs { let symbol = fungible_faucet_config.symbol.clone(); let (faucet_account, secret_key) = fungible_faucet_config.build_account()?; @@ -141,11 +205,6 @@ impl GenesisConfig { // we know the remaining supply in the faucets. } - let native_faucet_account_id = faucet_accounts - .get(&symbol) - .expect("Parsing guarantees the existence of a native faucet.") - .id(); - let fee_parameters = FeeParameters::new(native_faucet_account_id, fee_parameters.verification_base_fee)?; @@ -158,11 +217,13 @@ impl GenesisConfig { for (index, WalletConfig { has_updatable_code, storage_mode, assets }) in wallet_configs.into_iter().enumerate() { - tracing::debug!("Adding wallet account {index} with {assets:?}"); + tracing::debug!(index, assets = ?assets, "Adding wallet account"); let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; let init_seed: [u8; 32] = rng.random(); let account_type = if has_updatable_code { @@ -220,11 +281,11 @@ impl GenesisConfig { let mut storage_delta = AccountStorageDelta::default(); if total_issuance != 0 { - // slot 0 - storage_delta.set_item( - AccountStorage::faucet_sysdata_slot().clone(), - [ZERO, ZERO, ZERO, Felt::new(total_issuance)].into(), - )?; + let current_metadata = TokenMetadata::try_from(faucet_account.storage())?; + let updated_metadata = + current_metadata.with_token_supply(Felt::new(total_issuance))?; + storage_delta + .set_item(TokenMetadata::metadata_slot().clone(), updated_metadata.into())?; tracing::debug!( "Reducing faucet account {faucet} for {symbol} by {amount}", faucet = faucet_id.to_hex(), @@ -264,6 +325,9 @@ impl GenesisConfig { // Ensure the faucets always precede the wallets referencing them all_accounts.extend(wallet_accounts); + // Append file-loaded accounts as-is + all_accounts.extend(file_loaded_accounts); + Ok(( GenesisState { fee_parameters, @@ -277,36 +341,6 @@ impl GenesisConfig { } } -// NATIVE FAUCET -// ================================================================================================ - -/// Declare the native fungible asset -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(deny_unknown_fields)] -pub struct NativeFaucet { - /// Token symbol to use for fees. - symbol: TokenSymbolStr, - - decimals: u8, - /// Max supply in full token units - /// - /// It will be converted internally to the smallest representable unit, - /// using based `10.powi(decimals)` as a multiplier. - max_supply: u64, -} - -impl NativeFaucet { - fn to_faucet_config(&self) -> FungibleFaucetConfig { - let NativeFaucet { symbol, decimals, max_supply, .. } = self; - FungibleFaucetConfig { - symbol: symbol.clone(), - decimals: *decimals, - max_supply: *max_supply, - storage_mode: StorageMode::Public, - } - } -} - // FEE PARAMETER CONFIG // ================================================================================================ @@ -320,6 +354,54 @@ pub struct FeeParameterConfig { verification_base_fee: u32, } +// NATIVE FAUCET CONFIG +// ================================================================================================ + +/// Wraps an optional path to a pre-built faucet account file. +/// +/// When no path is provided, a default native faucet is built using hardcoded MIDEN defaults. +struct NativeFaucetConfig(Option); + +impl NativeFaucetConfig { + /// Build or load the native faucet account. + /// + /// For `None`, builds a new faucet from defaults and returns the generated secret key. + /// For `Some(path)`, loads the account from disk and validates it is a fungible faucet. + fn build_account( + self, + config_dir: &Path, + ) -> Result<(Account, TokenSymbolStr, Option), GenesisConfigError> { + match self.0 { + None => { + let symbol = TokenSymbolStr::from_str(DEFAULT_NATIVE_FAUCET_SYMBOL).unwrap(); + let faucet_config = FungibleFaucetConfig { + symbol: symbol.clone(), + decimals: DEFAULT_NATIVE_FAUCET_DECIMALS, + max_supply: DEFAULT_NATIVE_FAUCET_MAX_SUPPLY, + storage_mode: StorageMode::Public, + }; + let (account, secret_key) = faucet_config.build_account()?; + Ok((account, symbol, Some(secret_key))) + }, + Some(path) => { + let full_path = config_dir.join(&path); + let account_file = AccountFile::read(&full_path) + .map_err(|e| GenesisConfigError::AccountFileRead(e, full_path.clone()))?; + let account = account_file.account; + + if account.id().account_type() != AccountType::FungibleFaucet { + return Err(GenesisConfigError::NativeFaucetNotFungible { path: full_path }); + } + + let faucet = BasicFungibleFaucet::try_from(&account) + .expect("validated as fungible faucet above"); + let symbol = TokenSymbolStr::from(faucet.symbol()); + Ok((account, symbol, None)) + }, + } + } +} + // FUNGIBLE FAUCET CONFIG // ================================================================================================ @@ -349,7 +431,7 @@ impl FungibleFaucetConfig { } = self; let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthFalcon512Rpo::new(secret_key.public_key().into()); + let auth = AuthSingleSig::new(secret_key.public_key().into(), AuthScheme::Falcon512Rpo); let init_seed: [u8; 32] = rng.random(); let max_supply = Felt::try_from(max_supply) @@ -442,10 +524,10 @@ impl AccountSecrets { /// /// If no name is present, a new one is generated based on the current time /// and the index in - pub fn as_account_files( + pub fn as_account_files( &self, - genesis_state: &GenesisState, - ) -> impl Iterator> + use<'_, S> { + genesis_state: &GenesisState, + ) -> impl Iterator> + '_ { let account_lut = IndexMap::::from_iter( genesis_state.accounts.iter().map(|account| (account.id(), account.clone())), ); @@ -548,6 +630,14 @@ impl From for TokenSymbol { } } +impl From for TokenSymbolStr { + fn from(symbol: TokenSymbol) -> Self { + // SAFETY: TokenSymbol guarantees valid format, so to_string should not fail + let raw = symbol.to_string().expect("TokenSymbol should always produce valid string"); + Self { raw, encoded: symbol } + } +} + impl Ord for TokenSymbolStr { fn cmp(&self, other: &Self) -> Ordering { self.raw.cmp(&other.raw) diff --git a/crates/store/src/genesis/config/samples/01-simple.toml b/crates/store/src/genesis/config/samples/01-simple.toml index d32403e85c..2d7af48849 100644 --- a/crates/store/src/genesis/config/samples/01-simple.toml +++ b/crates/store/src/genesis/config/samples/01-simple.toml @@ -1,11 +1,6 @@ timestamp = 1717344256 version = 1 -[native_faucet] -decimals = 3 -max_supply = 100_000_000 -symbol = "MIDEN" - [fee_parameters] verification_base_fee = 0 diff --git a/crates/store/src/genesis/config/samples/02-with-account-files.toml b/crates/store/src/genesis/config/samples/02-with-account-files.toml new file mode 100644 index 0000000000..ede3032b64 --- /dev/null +++ b/crates/store/src/genesis/config/samples/02-with-account-files.toml @@ -0,0 +1,30 @@ +# Genesis configuration example with AggLayer account files +# +# This example demonstrates how to include pre-built accounts from .mac files +# in the genesis block. The account files are generated by the build script +# using deterministic seeds for reproducibility. +# +# They demonstrate interdependencies between accounts: +# - bridge.mac: AggLayer bridge account for cross-chain asset transfers +# - agglayer_faucet_eth.mac: AggLayer faucet for wrapped ETH, depends on the bridge account. +# - agglayer_faucet_usdc.mac: AggLayer faucet for wrapped USDC, depends on the bridge account. +# +# Paths are relative to the directory containing this configuration file. + +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +# AggLayer bridge account for bridging assets to/from AggLayer +[[account]] +path = "02-with-account-files/bridge.mac" + +# AggLayer ETH faucet for wrapped ETH tokens +[[account]] +path = "02-with-account-files/agglayer_faucet_eth.mac" + +# AggLayer USDC faucet for wrapped USDC tokens +[[account]] +path = "02-with-account-files/agglayer_faucet_usdc.mac" diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac new file mode 100644 index 0000000000..b76116b574 Binary files /dev/null and b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac differ diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac new file mode 100644 index 0000000000..8e0c2650be Binary files /dev/null and b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac differ diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac b/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac new file mode 100644 index 0000000000..9bab86857e Binary files /dev/null and b/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac differ diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index 23e2daa43c..ebcb61e8e8 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -1,3 +1,6 @@ +use std::io::Write; +use std::path::Path; + use assert_matches::assert_matches; use miden_protocol::ONE; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; @@ -6,11 +9,23 @@ use super::*; type TestResult = Result<(), Box>; +/// Helper to write TOML content to a file and return the path +fn write_toml_file(dir: &Path, content: &str) -> std::path::PathBuf { + let path = dir.join("genesis.toml"); + let mut file = std::fs::File::create(&path).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + path +} + #[test] #[miden_node_test_macro::enable_logging] fn parsing_yields_expected_default_values() -> TestResult { - let s = include_str!("./samples/01-simple.toml"); - let gcfg = GenesisConfig::read_toml(s)?; + // Copy sample file to temp dir since read_toml_file needs a real file path + let temp_dir = tempfile::tempdir()?; + let sample_content = include_str!("./samples/01-simple.toml"); + let config_path = write_toml_file(temp_dir.path(), sample_content); + + let gcfg = GenesisConfig::read_toml_file(&config_path)?; let (state, _secrets) = gcfg.into_state(SecretKey::new())?; let _ = state; // faucets always precede wallet accounts @@ -30,8 +45,8 @@ fn parsing_yields_expected_default_values() -> TestResult { { let faucet = BasicFungibleFaucet::try_from(native_faucet.clone()).unwrap(); - assert_eq!(faucet.max_supply(), Felt::new(100_000_000)); - assert_eq!(faucet.decimals(), 3); + assert_eq!(faucet.max_supply(), Felt::new(100_000_000_000_000_000)); + assert_eq!(faucet.decimals(), 6); assert_eq!(faucet.symbol(), TokenSymbol::new("MIDEN").unwrap()); } @@ -44,18 +59,15 @@ fn parsing_yields_expected_default_values() -> TestResult { }); // check total issuance of the faucet - assert_eq!( - native_faucet.storage().get_item(AccountStorage::faucet_sysdata_slot()).unwrap()[3], - Felt::new(999_777), - "Issuance mismatch" - ); + let metadata = TokenMetadata::try_from(native_faucet.storage()).unwrap(); + assert_eq!(metadata.token_supply(), Felt::new(999_777), "Issuance mismatch"); Ok(()) } -#[test] +#[tokio::test] #[miden_node_test_macro::enable_logging] -fn genesis_accounts_have_nonce_one() -> TestResult { +async fn genesis_accounts_have_nonce_one() -> TestResult { let gcfg = GenesisConfig::default(); let (state, secrets) = gcfg.into_state(SecretKey::new()).unwrap(); let mut iter = secrets.as_account_files(&state); @@ -64,6 +76,285 @@ fn genesis_accounts_have_nonce_one() -> TestResult { assert_eq!(status_quo.account.nonce(), ONE); - let _block = state.into_block()?; + let _block = state.into_block().await?; + Ok(()) +} + +#[test] +fn parsing_account_from_file() -> TestResult { + use miden_protocol::account::auth::AuthScheme; + use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; + use miden_standards::AuthMethod; + use miden_standards::account::wallets::create_basic_wallet; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a test wallet account and save it to a .mac file + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; + + let test_account = create_basic_wallet( + init_seed, + auth, + AccountType::RegularAccountUpdatableCode, + AccountStorageMode::Public, + )?; + + let account_id = test_account.id(); + + // Save to file + let account_file_path = config_dir.join("test_account.mac"); + let account_file = AccountFile::new(test_account, vec![]); + account_file.write(&account_file_path)?; + + // Create a genesis config TOML that references the account file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +[[account]] +path = "test_account.mac" +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parse the config + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // Convert to state and verify the account is included + let (state, _secrets) = gcfg.into_state(SecretKey::new())?; + assert!(state.accounts.iter().any(|a| a.id() == account_id)); + + Ok(()) +} + +#[test] +fn parsing_native_faucet_from_file() -> TestResult { + use miden_protocol::account::auth::AuthScheme; + use miden_protocol::account::{AccountBuilder, AccountFile, AccountStorageMode, AccountType}; + use miden_standards::account::auth::AuthSingleSig; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a faucet account and save it to a .mac file + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthSingleSig::new(secret_key.public_key().into(), AuthScheme::Falcon512Rpo); + + let faucet_component = + BasicFungibleFaucet::new(TokenSymbol::new("MIDEN").unwrap(), 6, Felt::new(1_000_000_000))?; + + let faucet_account = AccountBuilder::new(init_seed) + .account_type(AccountType::FungibleFaucet) + .storage_mode(AccountStorageMode::Public) + .with_auth_component(auth) + .with_component(faucet_component) + .build()?; + + let faucet_id = faucet_account.id(); + + // Save to file + let faucet_file_path = config_dir.join("native_faucet.mac"); + let account_file = AccountFile::new(faucet_account, vec![]); + account_file.write(&faucet_file_path)?; + + // Create a genesis config TOML that references the faucet file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +native_faucet = "native_faucet.mac" + +[fee_parameters] +verification_base_fee = 0 +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parse the config + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // Convert to state and verify the native faucet is included + let (state, secrets) = gcfg.into_state(SecretKey::new())?; + assert!(state.accounts.iter().any(|a| a.id() == faucet_id)); + + // No secrets should be generated for file-loaded native faucet + assert!(secrets.secrets.is_empty()); + + Ok(()) +} + +#[test] +fn native_faucet_from_file_must_be_faucet_type() -> TestResult { + use miden_protocol::account::auth::AuthScheme; + use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; + use miden_standards::AuthMethod; + use miden_standards::account::wallets::create_basic_wallet; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a regular wallet account (not a faucet) and try to use it as native faucet + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthMethod::SingleSig { + approver: (secret_key.public_key().into(), AuthScheme::Falcon512Rpo), + }; + + let regular_account = create_basic_wallet( + init_seed, + auth, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + )?; + + // Save to file + let account_file_path = config_dir.join("not_a_faucet.mac"); + let account_file = AccountFile::new(regular_account, vec![]); + account_file.write(&account_file_path)?; + + // Create a genesis config TOML that tries to use a non-faucet as native faucet + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +native_faucet = "not_a_faucet.mac" + +[fee_parameters] +verification_base_fee = 0 +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parsing should succeed + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // into_state should fail with NativeFaucetNotFungible error when loading the file + let result = gcfg.into_state(SecretKey::new()); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + matches!(err, GenesisConfigError::NativeFaucetNotFungible { .. }), + "Expected NativeFaucetNotFungible error, got: {err:?}" + ); + + Ok(()) +} + +#[test] +fn missing_account_file_returns_error() { + // Create a genesis config TOML that references a non-existent file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +[[account]] +path = "does_not_exist.mac" +"#; + + // Use temp dir as config dir + let temp_dir = tempfile::tempdir().unwrap(); + let config_path = write_toml_file(temp_dir.path(), toml_content); + + // Parsing should succeed + let gcfg = GenesisConfig::read_toml_file(&config_path).unwrap(); + + // into_state should fail with AccountFileRead error when loading the file + let result = gcfg.into_state(SecretKey::new()); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + matches!(err, GenesisConfigError::AccountFileRead(..)), + "Expected AccountFileRead error, got: {err:?}" + ); +} + +#[tokio::test] +#[miden_node_test_macro::enable_logging] +async fn parsing_agglayer_sample_with_account_files() -> TestResult { + use miden_protocol::account::AccountType; + + // Use the actual sample file path since it references relative .mac files + let sample_path = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("src/genesis/config/samples/02-with-account-files.toml"); + + let gcfg = GenesisConfig::read_toml_file(&sample_path)?; + let (state, secrets) = gcfg.into_state(SecretKey::new())?; + + // Should have 4 accounts: + // 1. Native faucet (MIDEN) - built from parameters + // 2. Bridge account (bridge.mac) - loaded from file + // 3. ETH faucet (agglayer_faucet_eth.mac) - loaded from file + // 4. USDC faucet (agglayer_faucet_usdc.mac) - loaded from file + assert_eq!(state.accounts.len(), 4, "Expected 4 accounts in genesis state"); + + // Verify account types + let native_faucet = &state.accounts[0]; + let bridge_account = &state.accounts[1]; + let eth_faucet = &state.accounts[2]; + let usdc_faucet = &state.accounts[3]; + + // Native faucet should be a fungible faucet (built from parameters) + assert_eq!( + native_faucet.id().account_type(), + AccountType::FungibleFaucet, + "Native faucet should be a FungibleFaucet" + ); + + // Verify native faucet symbol + { + let faucet = BasicFungibleFaucet::try_from(native_faucet.clone()).unwrap(); + assert_eq!(faucet.symbol(), TokenSymbol::new("MIDEN").unwrap()); + } + + // Bridge account is a regular account (not a faucet) + assert!( + bridge_account.is_regular_account(), + "Bridge account should be a regular account" + ); + + // ETH faucet should be a fungible faucet (AggLayer faucet loaded from file) + assert_eq!( + eth_faucet.id().account_type(), + AccountType::FungibleFaucet, + "ETH faucet should be a FungibleFaucet" + ); + + // USDC faucet should be a fungible faucet (AggLayer faucet loaded from file) + assert_eq!( + usdc_faucet.id().account_type(), + AccountType::FungibleFaucet, + "USDC faucet should be a FungibleFaucet" + ); + + // Only the native faucet generates a secret (built from parameters) + assert_eq!(secrets.secrets.len(), 1, "Only native faucet should generate a secret"); + + // Verify the genesis state can be converted to a block + let _block = state.into_block().await?; + Ok(()) } diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index 5df1825d66..b91a31634d 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -1,3 +1,4 @@ +use miden_node_utils::signer::BlockSigner; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{Account, AccountDelta}; @@ -9,17 +10,15 @@ use miden_protocol::block::{ BlockNoteTree, BlockNumber, BlockProof, - BlockSigner, FeeParameters, ProvenBlock, }; use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks}; use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage, Smt}; +use miden_protocol::errors::AccountError; use miden_protocol::note::Nullifier; use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionKernel}; -use crate::errors::GenesisError; - pub mod config; // GENESIS STATE @@ -68,28 +67,25 @@ impl GenesisState { } impl GenesisState { - /// Returns the block header and the account SMT - pub fn into_block(self) -> Result { + /// Returns the block header and the account SMT. + pub async fn into_block(self) -> anyhow::Result { let accounts: Vec = self .accounts .iter() .map(|account| { let account_update_details = if account.id().is_public() { - AccountUpdateDetails::Delta( - AccountDelta::try_from(account.clone()) - .map_err(GenesisError::AccountDelta)?, - ) + AccountUpdateDetails::Delta(AccountDelta::try_from(account.clone())?) } else { AccountUpdateDetails::Private }; Ok(BlockAccountUpdate::new( account.id(), - account.commitment(), + account.to_commitment(), account_update_details, )) }) - .collect::, GenesisError>>()?; + .collect::, AccountError>>()?; // Convert account updates to SMT entries using account_id_to_smt_key let smt_entries = accounts.iter().map(|update| { @@ -134,7 +130,10 @@ impl GenesisState { let block_proof = BlockProof::new_dummy(); - let signature = self.block_signer.sign(&header); + // Sign and assert verification for sanity (no mismatch between frontend and backend signing + // impls). + let signature = self.block_signer.sign(&header).await?; + assert!(signature.verify(header.commitment(), &self.block_signer.public_key())); // SAFETY: Header and accounts should be valid by construction. // No notes or nullifiers are created at genesis, which is consistent with the above empty // block note tree root and empty nullifier tree root. diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 0429864067..2d0a17242a 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -5,7 +5,7 @@ use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountV use miden_protocol::account::{ AccountId, NonFungibleDeltaAction, - StorageMap, + StorageMapKey, StorageMapWitness, StorageSlotName, }; @@ -13,7 +13,7 @@ use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; -use miden_protocol::errors::{AssetError, StorageMapError}; +use miden_protocol::errors::{AccountError, AssetError, StorageMapError}; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; @@ -25,6 +25,12 @@ mod tests; #[derive(Debug, Error)] pub enum InnerForestError { + #[error(transparent)] + Account(#[from] AccountError), + #[error(transparent)] + Asset(#[from] AssetError), + #[error(transparent)] + Merkle(#[from] MerkleError), #[error( "balance underflow: account {account_id}, faucet {faucet_id}, \ previous balance {prev_balance}, delta {delta}" @@ -64,7 +70,8 @@ pub(crate) struct InnerForest { /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. /// Accumulated from deltas - each block's entries include all entries up to that point. - storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, + storage_entries: + BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, /// Maps (`account_id`, `block_num`) to vault SMT root. /// Tracks asset vault versions across all blocks with structural sharing. @@ -89,6 +96,14 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } + /// Retrieves the most recent vault root for an account. + fn get_latest_vault_root(&self, account_id: AccountId) -> Word { + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::MAX)) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, @@ -128,13 +143,13 @@ impl InnerForest { account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, - raw_key: Word, + raw_key: StorageMapKey, ) -> Result { - let key = StorageMap::hash_key(raw_key); + let key_hash = raw_key.hash(); let root = self .get_storage_map_root(account_id, slot_name, block_num) .ok_or(WitnessError::RootNotFound)?; - let proof = self.forest.open(root, key)?; + let proof = self.forest.open(root, key_hash.into())?; Ok(StorageMapWitness::new(proof, vec![raw_key])?) } @@ -168,14 +183,14 @@ impl InnerForest { account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, - raw_keys: &[Word], + raw_keys: &[StorageMapKey], ) -> Option> { let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; // Collect SMT proofs for each key let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { - let key = StorageMap::hash_key(*raw_key); - self.forest.open(root, key) + let key_hash = raw_key.hash(); + self.forest.open(root, key_hash.into()) })); Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) @@ -216,7 +231,7 @@ impl InnerForest { // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- - /// Applies account updates from a block to the forest. + /// Updates the forest with account vault and storage changes from a delta. /// /// Iterates through account updates and applies each delta to the forest. /// Private accounts should be filtered out before calling this method. @@ -268,76 +283,67 @@ impl InnerForest { let account_id = delta.id(); let is_full_state = delta.is_full_state(); + // Validate full-state invariants in debug builds. + #[cfg(debug_assertions)] + if is_full_state { + let has_vault_root = self.vault_roots.keys().any(|(id, _)| *id == account_id); + let has_storage_root = self.storage_map_roots.keys().any(|(id, ..)| *id == account_id); + let has_storage_entries = self.storage_entries.keys().any(|(id, ..)| *id == account_id); + + assert!( + !has_vault_root && !has_storage_root && !has_storage_entries, + "full-state delta should not be applied to existing account" + ); + } + + // Apply vault changes. if is_full_state { - self.insert_account_vault(block_num, account_id, delta.vault()); + self.insert_account_vault(block_num, account_id, delta.vault())?; } else if !delta.vault().is_empty() { self.update_account_vault(block_num, account_id, delta.vault())?; } + // Apply storage map changes. if is_full_state { - self.insert_account_storage(block_num, account_id, delta.storage()); + self.insert_account_storage(block_num, account_id, delta.storage())?; } else if !delta.storage().is_empty() { - self.update_account_storage(block_num, account_id, delta.storage()); + self.update_account_storage(block_num, account_id, delta.storage())?; } Ok(()) } - // ASSET VAULT DELTA PROCESSING - // -------------------------------------------------------------------------------------------- - - /// Retrieves the most recent vault SMT root for an account. If no vault root is found for the - /// account, returns an empty SMT root. - fn get_latest_vault_root(&self, account_id: AccountId) -> Word { - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) - } - - /// Inserts asset vault data into the forest for the specified account. Assumes that asset - /// vault for this account does not yet exist in the forest. fn insert_account_vault( &mut self, block_num: BlockNumber, account_id: AccountId, - delta: &AccountVaultDelta, - ) { - // get the current vault root for the account, and make sure it is empty + vault_delta: &AccountVaultDelta, + ) -> Result<(), InnerForestError> { let prev_root = self.get_latest_vault_root(account_id); assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); - // if there are no assets in the vault, add a root of an empty SMT to the vault roots map - // so that the map has entries for all accounts, and then return (i.e., no need to insert - // anything into the forest) - if delta.is_empty() { + if vault_delta.is_empty() { self.vault_roots.insert((account_id, block_num), prev_root); - return; + return Ok(()); } let mut entries: Vec<(Word, Word)> = Vec::new(); - // process fungible assets - for (faucet_id, amount_delta) in delta.fungible().iter() { + for (faucet_id, amount_delta) in vault_delta.fungible().iter() { let amount = (*amount_delta).try_into().expect("full-state amount should be non-negative"); - let asset = FungibleAsset::new(*faucet_id, amount).expect("valid faucet id"); + let asset = FungibleAsset::new(*faucet_id, amount)?; entries.push((asset.vault_key().into(), asset.into())); } - // process non-fungible assets - for (&asset, _action) in delta.non_fungible().iter() { - // TODO: assert that action is addition + for (&asset, action) in vault_delta.non_fungible().iter() { + debug_assert_eq!(action, &NonFungibleDeltaAction::Add); entries.push((asset.vault_key().into(), asset.into())); } - assert!(!entries.is_empty(), "non-empty delta should contain entries"); let num_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); + let new_root = self.forest.batch_insert(prev_root, entries)?; self.vault_roots.insert((account_id, block_num), new_root); @@ -348,14 +354,81 @@ impl InnerForest { vault_entries = num_entries, "Inserted vault into forest" ); + Ok(()) } - /// Updates the forest with vault changes from a delta. The vault delta is assumed to be - /// non-empty. + fn insert_account_storage( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + storage_delta: &AccountStorageDelta, + ) -> Result<(), InnerForestError> { + for (slot_name, map_delta) in storage_delta.maps() { + let prev_root = self.get_latest_storage_map_root(account_id, slot_name); + assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + + let raw_map_entries: Vec<(StorageMapKey, Word)> = + Vec::from_iter(map_delta.entries().iter().filter_map(|(&key, &value)| { + if value == EMPTY_WORD { + None + } else { + Some((key.into_inner(), value)) + } + })); + + if raw_map_entries.is_empty() { + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), prev_root); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); + + continue; + } + + let hashed_entries: Vec<(Word, Word)> = Vec::from_iter( + raw_map_entries.iter().map(|(key, value)| (key.hash().into(), *value)), + ); + + let new_root = self.forest.batch_insert(prev_root, hashed_entries.iter().copied())?; + + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), new_root); + + let num_entries = raw_map_entries.len(); + + let map_entries = BTreeMap::from_iter(raw_map_entries); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), map_entries); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + ?slot_name, + delta_entries = num_entries, + "Inserted storage map into forest" + ); + } + Ok(()) + } + + // ASSET VAULT DELTA PROCESSING + // -------------------------------------------------------------------------------------------- + + /// Updates the forest with vault changes from a delta and returns the new root. /// /// Processes both fungible and non-fungible asset changes, building entries for the vault SMT /// and tracking the new root. /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + /// + /// # Returns + /// + /// The new vault root after applying the delta. + /// /// # Errors /// /// Returns an error if applying a delta results in a negative balance. @@ -363,19 +436,15 @@ impl InnerForest { &mut self, block_num: BlockNumber, account_id: AccountId, - delta: &AccountVaultDelta, - ) -> Result<(), InnerForestError> { - assert!(!delta.is_empty(), "expected the delta not to be empty"); - - // get the previous vault root; the root could be for an empty or non-empty SMT + vault_delta: &AccountVaultDelta, + ) -> Result { let prev_root = self.get_latest_vault_root(account_id); let mut entries: Vec<(Word, Word)> = Vec::new(); // Process fungible assets - for (faucet_id, amount_delta) in delta.fungible().iter() { - let key: Word = - FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); + for (faucet_id, amount_delta) in vault_delta.fungible().iter() { + let key: Word = FungibleAsset::new(*faucet_id, 0)?.vault_key().into(); let new_amount = { // amount delta is a change that must be applied to previous balance. @@ -402,13 +471,13 @@ impl InnerForest { let value = if new_amount == 0 { EMPTY_WORD } else { - FungibleAsset::new(*faucet_id, new_amount).expect("valid fungible asset").into() + FungibleAsset::new(*faucet_id, new_amount)?.into() }; entries.push((key, value)); } // Process non-fungible assets - for (asset, action) in delta.non_fungible().iter() { + for (asset, action) in vault_delta.non_fungible().iter() { let value = match action { NonFungibleDeltaAction::Add => Word::from(Asset::NonFungible(*asset)), NonFungibleDeltaAction::Remove => EMPTY_WORD, @@ -416,13 +485,14 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - assert!(!entries.is_empty(), "non-empty delta should contain entries"); + if entries.is_empty() { + self.vault_roots.insert((account_id, block_num), prev_root); + return Ok(prev_root); + } + let num_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); + let new_root = self.forest.batch_insert(prev_root, entries)?; self.vault_roots.insert((account_id, block_num), new_root); @@ -433,14 +503,13 @@ impl InnerForest { vault_entries = num_entries, "Updated vault in forest" ); - Ok(()) + Ok(new_root) } // STORAGE MAP DELTA PROCESSING // -------------------------------------------------------------------------------------------- - /// Retrieves the most recent storage map SMT root for an account slot. If no storage root is - /// found for the slot, returns an empty SMT root. + /// Retrieves the most recent storage map SMT root for an account slot. fn get_latest_storage_map_root( &self, account_id: AccountId, @@ -449,7 +518,7 @@ impl InnerForest { self.storage_map_roots .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..=(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) @@ -461,135 +530,60 @@ impl InnerForest { &self, account_id: AccountId, slot_name: &StorageSlotName, - ) -> BTreeMap { + ) -> BTreeMap { self.storage_entries .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map(|(_, entries)| entries.clone()) .unwrap_or_default() } - /// Inserts all storage maps from the provided storage delta into the forest. + /// Updates the forest with storage map changes from a delta and returns updated roots. /// - /// Assumes that storage maps for the provided account are not in the forest already. - fn insert_account_storage( + /// Processes storage map slot deltas, building SMTs for each modified slot + /// and tracking the new roots and accumulated entries. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + /// + /// # Returns + /// + /// A map from slot name to the new storage map root for that slot. + fn update_account_storage( &mut self, block_num: BlockNumber, account_id: AccountId, - delta: &AccountStorageDelta, - ) { - for (slot_name, map_delta) in delta.maps() { - // get the latest root for this map, and make sure the root is for an empty tree - let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); - - // build a vector of raw entries and filter out any empty values; such values - // shouldn't be present in full-state deltas, but it is good to exclude them - // explicitly - let raw_map_entries: Vec<(Word, Word)> = map_delta - .entries() - .iter() - .filter_map(|(&key, &value)| { - if value == EMPTY_WORD { - None - } else { - Some((Word::from(key), value)) - } - }) - .collect(); - - // if the delta is empty, make sure we create an entry in the storage map roots map - // and storage entries map (so storage_map_entries() queries work) - if raw_map_entries.is_empty() { - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), prev_root); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); - - continue; - } - - // hash the keys before inserting into the forest, matching how `StorageMap` - // hashes keys before inserting into the SMT. - let hashed_entries: Vec<(Word, Word)> = raw_map_entries - .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) - .collect(); - - // insert the updates into the forest and update storage map roots map - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); - - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); + storage_delta: &AccountStorageDelta, + ) -> Result, InnerForestError> { + let mut updated_roots = BTreeMap::new(); - assert!(!raw_map_entries.is_empty(), "a non-empty delta should have entries"); - let num_entries = raw_map_entries.len(); - - // keep track of the state of storage map entries (using raw keys for delta merging) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let map_entries = BTreeMap::from_iter(raw_map_entries); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), map_entries); + for (slot_name, map_delta) in storage_delta.maps() { + let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - tracing::debug!( - target: crate::COMPONENT, - %account_id, - %block_num, - ?slot_name, - delta_entries = num_entries, - "Inserted storage map into forest" + let delta_entries = Vec::from_iter( + map_delta.entries().iter().map(|(key, value)| ((*key).into_inner(), *value)), ); - } - } - /// Updates the forest with storage map changes from a delta. - /// - /// Processes storage map slot deltas, building SMTs for each modified slot and tracking the - /// new roots and accumulated entries. - fn update_account_storage( - &mut self, - block_num: BlockNumber, - account_id: AccountId, - delta: &AccountStorageDelta, - ) { - assert!(!delta.is_empty(), "expected the delta not to be empty"); - - for (slot_name, map_delta) in delta.maps() { - // map delta shouldn't be empty, but if it is for some reason, there is nothing to do - if map_delta.is_empty() { + if delta_entries.is_empty() { continue; } - // update the storage map tree in the forest and add an entry to the storage map roots - let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - let delta_entries: Vec<(Word, Word)> = - map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); - - // Hash the keys before inserting into the forest, matching how StorageMap - // hashes keys before inserting into the SMT. let hashed_entries: Vec<(Word, Word)> = delta_entries .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) + .map(|(key, value): &(StorageMapKey, Word)| (key.hash().into(), *value)) .collect(); - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); + let updated_root = self.forest.batch_insert(prev_root, hashed_entries)?; self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); + .insert((account_id, slot_name.clone(), block_num), updated_root); + updated_roots.insert(slot_name.clone(), updated_root); - // merge the delta with the latest entries in the map (using raw keys) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive let mut latest_entries = self.get_latest_storage_map_entries(account_id, slot_name); for (key, value) in &delta_entries { if *value == EMPTY_WORD { @@ -611,5 +605,9 @@ impl InnerForest { "Updated storage map in forest" ); } + + Ok(updated_roots) } + + // TODO: tie in-memory forest retention to DB pruning policy once forest queries rely on it. } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 5fc0cc6c0c..3045822c7b 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -1,4 +1,4 @@ -use miden_protocol::account::AccountCode; +use miden_protocol::account::{AccountCode, StorageMapKey}; use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, @@ -323,7 +323,7 @@ fn test_update_storage_map() { let block_num = BlockNumber::GENESIS.child(); let slot_name = StorageSlotName::mock(3); - let key = Word::from([1u32, 2, 3, 4]); + let key = StorageMapKey::new(Word::from([1u32, 2, 3, 4])); let value = Word::from([5u32, 6, 7, 8]); let mut map_delta = StorageMapDelta::default(); @@ -403,8 +403,8 @@ fn test_storage_map_incremental_updates() { let account_id = dummy_account(); let slot_name = StorageSlotName::mock(3); - let key1 = Word::from([1u32, 0, 0, 0]); - let key2 = Word::from([2u32, 0, 0, 0]); + let key1 = StorageMapKey::from_index(1u32); + let key2 = StorageMapKey::from_index(2u32); let value1 = Word::from([10u32, 0, 0, 0]); let value2 = Word::from([20u32, 0, 0, 0]); let value3 = Word::from([30u32, 0, 0, 0]); @@ -445,9 +445,58 @@ fn test_storage_map_incremental_updates() { assert_ne!(root_1, root_3); } +#[test] +fn test_storage_map_removals() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + const SLOT_INDEX: usize = 3; + const VALUE_1: [u32; 4] = [10, 0, 0, 0]; + const VALUE_2: [u32; 4] = [20, 0, 0, 0]; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(SLOT_INDEX); + let key_1 = StorageMapKey::from_index(1); + let key_2 = StorageMapKey::from_index(2); + let value_1 = Word::from(VALUE_1); + let value_2 = Word::from(VALUE_2); + + let block_1 = BlockNumber::GENESIS.child(); + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(key_1, value_1); + map_delta_1.insert(key_2, value_2); + let raw_1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + let block_2 = block_1.child(); + let map_delta_2 = StorageMapDelta::from_iters([key_1], []); + let raw_2 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_2))]); + let storage_delta_2 = AccountStorageDelta::from_raw(raw_2); + let delta_2 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_2); + forest.update_account(block_2, &delta_2).unwrap(); + + let entries = forest + .storage_map_entries(account_id, slot_name, block_2) + .expect("storage entries should be available"); + + let StorageMapEntries::AllEntries(entries) = entries.entries else { + panic!("expected entries without proofs"); + }; + + let entries_by_key = BTreeMap::from_iter(entries); + assert_eq!(entries_by_key.len(), 1); + assert_eq!(entries_by_key.get(&key_2), Some(&value_2)); + assert!(!entries_by_key.contains_key(&key_1)); +} + #[test] fn test_empty_storage_map_entries_query() { - use miden_protocol::account::auth::PublicKeyCommitment; + use miden_protocol::account::auth::{AuthScheme, PublicKeyCommitment}; + use miden_protocol::account::component::AccountComponentMetadata; use miden_protocol::account::{ AccountBuilder, AccountComponent, @@ -456,7 +505,7 @@ fn test_empty_storage_map_entries_query() { StorageMap, StorageSlot, }; - use miden_standards::account::auth::AuthFalcon512Rpo; + use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; let mut forest = InnerForest::new(); @@ -470,15 +519,21 @@ fn test_empty_storage_map_entries_query() { let component_code = CodeBuilder::default() .compile_component_code("test::interface", "pub proc test push.1 end") .unwrap(); - let account_component = AccountComponent::new(component_code, component_storage) - .unwrap() - .with_supports_all_types(); + let account_component = AccountComponent::new( + component_code, + component_storage, + AccountComponentMetadata::new("test").with_supports_all_types(), + ) + .unwrap(); let account = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthSingleSig::new( + PublicKeyCommitment::from(EMPTY_WORD), + AuthScheme::Falcon512Rpo, + )) .build_existing() .unwrap(); diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 1d345dcf01..519f8504b9 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -10,7 +10,11 @@ pub mod state; #[cfg(feature = "rocksdb")] pub use accounts::PersistentAccountTree; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; +pub use db::Db; +pub use db::models::conv::SqlTypeConvert; +pub use errors::DatabaseError; pub use genesis::GenesisState; +pub use server::block_prover_client::BlockProver; pub use server::{DataDirectory, Store}; // CONSTANTS diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 292842e778..56bfcafb49 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -6,13 +6,15 @@ use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::BlockNumber; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockInputs, BlockNumber}; use miden_protocol::note::Nullifier; use tonic::{Request, Response, Status}; use tracing::{info, instrument}; -use crate::COMPONENT; +use crate::errors::GetBlockInputsError; use crate::state::State; +use crate::{BlockProver, COMPONENT}; // STORE API // ================================================================================================ @@ -20,6 +22,7 @@ use crate::state::State; #[derive(Clone)] pub struct StoreApi { pub(super) state: Arc, + pub(super) block_prover: Arc, } impl StoreApi { @@ -43,6 +46,40 @@ impl StoreApi { mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), })) } + + /// Retrieves block inputs from state based on the contents of the supplied ordered batches. + pub(crate) async fn block_inputs_from_ordered_batches( + &self, + batches: &OrderedBatches, + ) -> Result { + // Construct fields required to retrieve block inputs. + let mut account_ids = BTreeSet::new(); + let mut nullifiers = Vec::new(); + let mut unauthenticated_note_commitments = BTreeSet::new(); + let mut reference_blocks = BTreeSet::new(); + + for batch in batches.as_slice() { + account_ids.extend(batch.updated_accounts()); + nullifiers.extend(batch.created_nullifiers()); + reference_blocks.insert(batch.reference_block_num()); + + for note in batch.input_notes().iter() { + if let Some(header) = note.header() { + unauthenticated_note_commitments.insert(header.commitment()); + } + } + } + + // Retrieve block inputs from the store. + self.state + .get_block_inputs( + account_ids.into_iter().collect(), + nullifiers, + unauthenticated_note_commitments, + reference_blocks, + ) + .await + } } // UTILITIES @@ -138,8 +175,13 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(nullifiers = nullifiers.len()), + err +)] pub fn validate_nullifiers(nullifiers: &[proto::primitives::Digest]) -> Result, E> where E: From + std::fmt::Display, @@ -152,8 +194,13 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(notes = notes.len()), + err +)] pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result, Status> { notes .iter() @@ -162,7 +209,12 @@ pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result< .map_err(|_| invalid_argument("Digest field is not in the modulus range")) } -#[instrument(level = "debug",target = COMPONENT, skip_all)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(block_numbers = block_numbers.len()) +)] pub fn read_block_numbers(block_numbers: &[u32]) -> BTreeSet { BTreeSet::from_iter(block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number))) } diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 9dd2b39c4d..25f6b05f60 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,12 +1,16 @@ use std::convert::Infallible; +use futures::TryFutureExt; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_node_proto::errors::MissingFieldHelper; use miden_node_proto::generated::store::block_producer_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; -use miden_protocol::block::{BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockBody, BlockHeader, BlockNumber, SignedBlock}; use miden_protocol::utils::Deserializable; use tonic::{Request, Response, Status}; use tracing::Instrument; @@ -40,33 +44,69 @@ impl block_producer_server::BlockProducer for StoreApi { /// Updates the local DB by inserting a new block header and the related data. async fn apply_block( &self, - request: Request, + request: Request, ) -> Result, Status> { let request = request.into_inner(); - - let block = ProvenBlock::read_from_bytes(&request.block).map_err(|err| { - Status::invalid_argument(err.as_report_context("block deserialization error")) - })?; + // Read ordered batches. + let ordered_batches = + OrderedBatches::read_from_bytes(&request.ordered_batches).map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to deserialize ordered batches"), + ) + })?; + // Read block. + let block = request + .block + .ok_or(proto::store::ApplyBlockRequest::missing_field(stringify!(block)))?; + // Read block header. + let header: BlockHeader = block + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + // Read block body. + let body: BlockBody = block + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + // Read signature. + let signature: Signature = block + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + // Get block inputs from ordered batches. + let block_inputs = + self.block_inputs_from_ordered_batches(&ordered_batches).await.map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to get block inputs from ordered batches"), + ) + })?; let span = tracing::Span::current(); - span.set_attribute("block.number", block.header().block_num()); - span.set_attribute("block.commitment", block.header().commitment()); - span.set_attribute("block.accounts.count", block.body().updated_accounts().len()); - span.set_attribute("block.output_notes.count", block.body().output_notes().count()); - span.set_attribute("block.nullifiers.count", block.body().created_nullifiers().len()); - - // We perform the apply_block work in a separate task. This prevents the caller cancelling - // the request and thereby cancelling the task at an arbitrary point of execution. + span.set_attribute("block.number", header.block_num()); + span.set_attribute("block.commitment", header.commitment()); + span.set_attribute("block.accounts.count", body.updated_accounts().len()); + span.set_attribute("block.output_notes.count", body.output_notes().count()); + span.set_attribute("block.nullifiers.count", body.created_nullifiers().len()); + + // We perform the apply/prove block work in a separate task. This prevents the caller + // cancelling the request and thereby cancelling the task at an arbitrary point of + // execution. // // Normally this shouldn't be a problem, however our apply_block isn't quite ACID compliant // so things get a bit messy. This is more a temporary hack-around to minimize this risk. let this = self.clone(); - tokio::spawn( + // TODO(sergerad): Use block proof. + let _block_proof = tokio::spawn( async move { + // SAFETY: The header, body, and signature are assumed to + // correspond to each other because they are provided by the Block + // Producer. + let signed_block = SignedBlock::new_unchecked(header.clone(), body, signature); // TODO(sergerad): Use `SignedBlock::new()` when available. + // Note: This is an internal endpoint, so its safe to expose the full error + // report. this.state - .apply_block(block) - .await - .map(Response::new) + .apply_block(signed_block) .inspect_err(|err| { span.set_error(err); }) @@ -75,11 +115,15 @@ impl block_producer_server::BlockProducer for StoreApi { ApplyBlockError::InvalidBlockError(_) => tonic::Code::InvalidArgument, _ => tonic::Code::Internal, }; - - // This is an internal endpoint, so its safe to expose the full error - // report. Status::new(code, err.as_report()) }) + .and_then(|_| { + this.block_prover + .prove(ordered_batches, block_inputs, &header) + .map_err(|err| Status::new(tonic::Code::Internal, err.as_report())) + }) + .await + .map(Response::new) } .in_current_span(), ) @@ -87,7 +131,8 @@ impl block_producer_server::BlockProducer for StoreApi { .map_err(|err| { tonic::Status::internal(err.as_report_context("joining apply_block task failed")) }) - .flatten() + .flatten()?; + Ok(Response::new(())) } /// Returns data needed by the block producer to construct and prove the next block. diff --git a/crates/store/src/server/block_prover_client.rs b/crates/store/src/server/block_prover_client.rs new file mode 100644 index 0000000000..5af15ac433 --- /dev/null +++ b/crates/store/src/server/block_prover_client.rs @@ -0,0 +1,55 @@ +use miden_block_prover::{BlockProverError, LocalBlockProver}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockProof}; +use miden_remote_prover_client::{RemoteBlockProver, RemoteProverClientError}; +use tracing::instrument; + +use crate::COMPONENT; + +#[derive(Debug, thiserror::Error)] +pub enum StoreProverError { + #[error("local proving failed")] + LocalProvingFailed(#[source] BlockProverError), + #[error("remote proving failed")] + RemoteProvingFailed(#[source] RemoteProverClientError), +} + +// BLOCK PROVER +// ================================================================================================ + +/// Block prover which allows for proving via either local or remote backend. +/// +/// The local proving variant is intended for development and testing purposes. +/// The remote proving variant is intended for production use. +pub enum BlockProver { + Local(LocalBlockProver), + Remote(RemoteBlockProver), +} + +impl BlockProver { + pub fn local() -> Self { + Self::Local(LocalBlockProver::new(0)) + } + + pub fn remote(endpoint: impl Into) -> Self { + Self::Remote(RemoteBlockProver::new(endpoint)) + } + + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn prove( + &self, + tx_batches: OrderedBatches, + block_inputs: BlockInputs, + block_header: &BlockHeader, + ) -> Result { + match self { + Self::Local(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .map_err(StoreProverError::LocalProvingFailed)?), + Self::Remote(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .await + .map_err(StoreProverError::RemoteProvingFailed)?), + } + } +} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index b4b5798db9..8c828f1166 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -11,22 +11,24 @@ use miden_node_proto_build::{ store_rpc_api_descriptor, }; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; +use miden_node_utils::signer::BlockSigner; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_protocol::block::BlockSigner; use tokio::net::TcpListener; use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; use tower_http::trace::TraceLayer; use tracing::{info, instrument}; +use url::Url; use crate::blocks::BlockStore; use crate::db::Db; use crate::errors::ApplyBlockError; use crate::state::State; -use crate::{COMPONENT, GenesisState}; +use crate::{BlockProver, COMPONENT, GenesisState}; mod api; mod block_producer; +pub mod block_prover_client; mod ntx_builder; mod rpc_api; @@ -35,6 +37,8 @@ pub struct Store { pub rpc_listener: TcpListener, pub ntx_builder_listener: TcpListener, pub block_producer_listener: TcpListener, + /// URL for the Block Prover client. Uses local prover if `None`. + pub block_prover_url: Option, pub data_directory: PathBuf, /// Server-side timeout for an individual gRPC request. /// @@ -50,12 +54,13 @@ impl Store { skip_all, err, )] - pub fn bootstrap( + pub async fn bootstrap( genesis: GenesisState, data_directory: &Path, ) -> anyhow::Result<()> { let genesis = genesis .into_block() + .await .context("failed to convert genesis configuration into the genesis block")?; let data_directory = @@ -100,14 +105,25 @@ impl Store { .context("failed to load state")?, ); - let rpc_service = - store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + // Initialize local or remote block prover. + let block_prover = if let Some(url) = self.block_prover_url { + Arc::new(BlockProver::remote(url)) + } else { + Arc::new(BlockProver::local()) + }; + + let rpc_service = store::rpc_server::RpcServer::new(api::StoreApi { + state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), + }); let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let block_producer_service = store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index a0fefa0e7a..f6e8d4a7a5 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -7,7 +7,7 @@ use miden_node_proto::generated as proto; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::store::ntx_builder_server; use miden_node_utils::ErrorReport; -use miden_protocol::account::StorageSlotName; +use miden_protocol::account::{StorageMapKey, StorageSlotName}; use miden_protocol::asset::AssetVaultKey; use miden_protocol::block::BlockNumber; use miden_protocol::note::Note; @@ -16,7 +16,12 @@ use tracing::debug; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError, GetWitnessesError}; +use crate::errors::{ + GetAccountError, + GetNetworkAccountIdsError, + GetNoteScriptByRootError, + GetWitnessesError, +}; use crate::server::api::{ StoreApi, internal_error, @@ -167,7 +172,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let proof = self.state.get_account(account_request).await?; @@ -176,11 +181,12 @@ impl ntx_builder_server::NtxBuilder for StoreApi { async fn get_note_script_by_root( &self, - request: Request, + request: Request, ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); - let root = read_root::(request.into_inner().root, "NoteRoot")?; + let root = + read_root::(request.into_inner().root, "NoteScriptRoot")?; let note_script = self .state @@ -256,8 +262,9 @@ impl ntx_builder_server::NtxBuilder for StoreApi { read_account_id::(request.account_id).map_err(invalid_argument)?; // Read the map key. - let map_key = - read_root::(request.map_key, "MapKey").map_err(invalid_argument)?; + let map_key = read_root::(request.map_key, "MapKey") + .map(StorageMapKey::new) + .map_err(invalid_argument)?; // Read the slot name. let slot_name = StorageSlotName::new(request.slot_name).map_err(|err| { diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 67ef1df78e..bb3098fffa 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,4 +1,6 @@ use miden_node_proto::convert; +use miden_node_proto::domain::block::InvalidBlockRange; +use miden_node_proto::errors::MissingFieldHelper; use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; use miden_node_utils::limiter::{ @@ -10,6 +12,7 @@ use miden_node_utils::limiter::{ }; use miden_protocol::Word; use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; use miden_protocol::note::NoteId; use tonic::{Request, Response, Status}; use tracing::{debug, info}; @@ -17,12 +20,14 @@ use tracing::{debug, info}; use crate::COMPONENT; use crate::errors::{ CheckNullifiersError, + GetAccountError, GetBlockByNumberError, GetNoteScriptByRootError, GetNotesByIdError, NoteSyncError, SyncAccountStorageMapsError, SyncAccountVaultError, + SyncChainMmrError, SyncNullifiersError, SyncTransactionsError, }; @@ -117,54 +122,6 @@ impl rpc_server::Rpc for StoreApi { })) } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects the client is interested in. - async fn sync_state( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - - let account_ids: Vec = read_account_ids::(&request.account_ids)?; - - let (state, delta) = self - .state - .sync_state(request.block_num.into(), account_ids, request.note_tags) - .await - .map_err(internal_error)?; - - let accounts = state - .account_updates - .into_iter() - .map(|account_info| proto::account::AccountSummary { - account_id: Some(account_info.account_id.into()), - account_commitment: Some(account_info.account_commitment.into()), - block_num: account_info.block_num.as_u32(), - }) - .collect(); - - let transactions = state - .transactions - .into_iter() - .map(|transaction_summary| proto::transaction::TransactionSummary { - account_id: Some(transaction_summary.account_id.into()), - block_num: transaction_summary.block_num.as_u32(), - transaction_id: Some(transaction_summary.transaction_id.into()), - }) - .collect(); - - let notes = state.notes.into_iter().map(Into::into).collect(); - - Ok(Response::new(proto::rpc::SyncStateResponse { - chain_tip: self.state.latest_block_num().await.as_u32(), - block_header: Some(state.block_header.into()), - mmr_delta: Some(delta.into()), - accounts, - transactions, - notes, - })) - } - /// Returns info which can be used by the client to sync note state. async fn sync_notes( &self, @@ -196,6 +153,45 @@ impl rpc_server::Rpc for StoreApi { })) } + /// Returns chain MMR updates within a block range. + async fn sync_chain_mmr( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let chain_tip = self.state.latest_block_num().await; + + let block_range = request + .block_range + .ok_or_else(|| proto::rpc::SyncChainMmrRequest::missing_field(stringify!(block_range))) + .map_err(SyncChainMmrError::DeserializationFailed)?; + + let block_from = BlockNumber::from(block_range.block_from); + if block_from > chain_tip { + Err(SyncChainMmrError::FutureBlock { chain_tip, block_from })?; + } + + let block_to = block_range.block_to.map_or(chain_tip, BlockNumber::from).min(chain_tip); + + if block_from > block_to { + Err(SyncChainMmrError::InvalidBlockRange(InvalidBlockRange::StartGreaterThanEnd { + start: block_from, + end: block_to, + }))?; + } + let block_range = block_from..=block_to; + let mmr_delta = + self.state.sync_chain_mmr(block_range.clone()).await.map_err(internal_error)?; + + Ok(Response::new(proto::rpc::SyncChainMmrResponse { + block_range: Some(proto::rpc::BlockRange { + block_from: block_range.start().as_u32(), + block_to: Some(block_range.end().as_u32()), + }), + mmr_delta: Some(mmr_delta.into()), + })) + } + /// Returns a list of [`Note`]s for the specified [`NoteId`]s. /// /// If the list is empty or no [`Note`] matched the requested [`NoteId`] and empty list is @@ -250,7 +246,7 @@ impl rpc_server::Rpc for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let account_data = self.state.get_account(account_request).await?; @@ -327,7 +323,7 @@ impl rpc_server::Rpc for StoreApi { let storage_maps_page = self .state - .get_storage_map_sync_values(account_id, block_range) + .sync_account_storage_maps(account_id, block_range) .await .map_err(SyncAccountStorageMapsError::from)?; @@ -364,11 +360,12 @@ impl rpc_server::Rpc for StoreApi { async fn get_note_script_by_root( &self, - request: Request, + request: Request, ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); - let root = read_root::(request.into_inner().root, "NoteRoot")?; + let root = + read_root::(request.into_inner().root, "NoteScriptRoot")?; let note_script = self .state diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs new file mode 100644 index 0000000000..7949fcbeb6 --- /dev/null +++ b/crates/store/src/state/apply_block.rs @@ -0,0 +1,294 @@ +use std::sync::Arc; + +use miden_node_utils::ErrorReport; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::SignedBlock; +use miden_protocol::note::NoteDetails; +use miden_protocol::transaction::OutputNote; +use miden_protocol::utils::Serializable; +use tokio::sync::oneshot; +use tracing::{Instrument, info, info_span, instrument}; + +use crate::db::NoteRecord; +use crate::errors::{ApplyBlockError, InvalidBlockError}; +use crate::state::State; +use crate::{COMPONENT, HistoricalError}; + +impl State { + /// Apply changes of a new block to the DB and in-memory data structures. + /// + /// ## Note on state consistency + /// + /// The server contains in-memory representations of the existing trees, the in-memory + /// representation must be kept consistent with the committed data, this is necessary so to + /// provide consistent results for all endpoints. In order to achieve consistency, the + /// following steps are used: + /// + /// - the request data is validated, prior to starting any modifications. + /// - block is being saved into the store in parallel with updating the DB, but before + /// committing. This block is considered as candidate and not yet available for reading + /// because the latest block pointer is not updated yet. + /// - a transaction is open in the DB and the writes are started. + /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the + /// in-memory representations, which are consistent at this stage. + /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is + /// acquired, preventing concurrent reads to the in-memory data, since that will be + /// out-of-sync w.r.t. the DB. + /// - the DB transaction is committed, and requests that read only from the DB can proceed to + /// use the fresh data. + /// - the in-memory structures are updated, including the latest block pointer and the lock is + /// released. + // TODO: This span is logged in a root span, we should connect it to the parent span. + #[expect(clippy::too_many_lines)] + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn apply_block(&self, signed_block: SignedBlock) -> Result<(), ApplyBlockError> { + let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; + + let header = signed_block.header(); + let body = signed_block.body(); + + // Validate that header and body match. + let tx_commitment = body.transactions().commitment(); + if header.tx_commitment() != tx_commitment { + return Err(InvalidBlockError::InvalidBlockTxCommitment { + expected: tx_commitment, + actual: header.tx_commitment(), + } + .into()); + } + + let block_num = header.block_num(); + let block_commitment = header.commitment(); + + // Validate that the applied block is the next block in sequence. + let prev_block = self + .db + .select_block_header_by_block_num(None) + .await? + .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; + let expected_block_num = prev_block.block_num().child(); + if block_num != expected_block_num { + return Err(InvalidBlockError::NewBlockInvalidBlockNum { + expected: expected_block_num, + submitted: block_num, + } + .into()); + } + if header.prev_block_commitment() != prev_block.commitment() { + return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); + } + + // Save the block to the block store. In a case of a rolled-back DB transaction, the + // in-memory state will be unchanged, but the block might still be written into the + // block store. Thus, such block should be considered as block candidates, but not + // finalized blocks. So we should check for the latest block when getting block from + // the store. + let signed_block_bytes = signed_block.to_bytes(); + let store = Arc::clone(&self.block_store); + let block_save_task = tokio::spawn( + async move { store.save_block(block_num, &signed_block_bytes).await }.in_current_span(), + ); + + // Scope to read in-memory data, compute mutations required for updating account + // and nullifier trees, and validate the request. + let ( + nullifier_tree_old_root, + nullifier_tree_update, + account_tree_old_root, + account_tree_update, + ) = { + let inner = self.inner.read().await; + + let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); + + // nullifiers can be produced only once + let duplicate_nullifiers: Vec<_> = body + .created_nullifiers() + .iter() + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) + .copied() + .collect(); + if !duplicate_nullifiers.is_empty() { + return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); + } + + // compute updates for the in-memory data structures + + // new_block.chain_root must be equal to the chain MMR root prior to the update + let peaks = inner.blockchain.peaks(); + if peaks.hash_peaks() != header.chain_commitment() { + return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); + } + + // compute update for nullifier tree + let nullifier_tree_update = inner + .nullifier_tree + .compute_mutations( + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + ) + .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; + + if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { + // We do our best here to notify the serve routine, if it doesn't care (dropped the + // receiver) we can't do much. + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidNullifierRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); + } + + // compute update for account tree + let account_tree_update = inner + .account_tree + .compute_mutations( + body.updated_accounts() + .iter() + .map(|update| (update.account_id(), update.final_state_commitment())), + ) + .map_err(|e| match e { + HistoricalError::AccountTreeError(err) => { + InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) + }, + HistoricalError::MerkleError(_) => { + panic!("Unexpected MerkleError during account tree mutation computation") + }, + })?; + + if account_tree_update.as_mutation_set().root() != header.account_root() { + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidAccountRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); + } + + ( + inner.nullifier_tree.root(), + nullifier_tree_update, + inner.account_tree.root_latest(), + account_tree_update, + ) + }; + + // Build note tree. + let note_tree = body.compute_block_note_tree(); + if note_tree.root() != header.note_root() { + return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); + } + + let notes = body + .output_notes() + .map(|(note_index, note)| { + let (details, nullifier) = match note { + OutputNote::Full(note) => { + (Some(NoteDetails::from(note)), Some(note.nullifier())) + }, + OutputNote::Header(_) => (None, None), + note @ OutputNote::Partial(_) => { + return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( + note.clone(), + ))); + }, + }; + + let inclusion_path = note_tree.open(note_index); + + let note_record = NoteRecord { + block_num, + note_index, + note_id: note.id().as_word(), + note_commitment: note.commitment(), + metadata: note.metadata().clone(), + details, + inclusion_path, + }; + + Ok((note_record, nullifier)) + }) + .collect::, InvalidBlockError>>()?; + + // Signals the transaction is ready to be committed, and the write lock can be acquired. + let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); + // Signals the write lock has been acquired, and the transaction can be committed. + let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); + + // Extract public account updates with deltas before block is moved into async task. + // Private accounts are filtered out since they don't expose their state changes. + let account_deltas = + Vec::from_iter(body.updated_accounts().iter().filter_map( + |update| match update.details() { + AccountUpdateDetails::Delta(delta) => Some(delta.clone()), + AccountUpdateDetails::Private => None, + }, + )); + + // The DB and in-memory state updates need to be synchronized and are partially + // overlapping. Namely, the DB transaction only proceeds after this task acquires the + // in-memory write lock. This requires the DB update to run concurrently, so a new task is + // spawned. + let db = Arc::clone(&self.db); + let db_update_task = tokio::spawn( + async move { db.apply_block(allow_acquire, acquire_done, signed_block, notes).await } + .in_current_span(), + ); + + // Wait for the message from the DB update task, that we ready to commit the DB transaction. + acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; + + // Awaiting the block saving task to complete without errors. + block_save_task.await??; + + // Scope to update the in-memory data. + async move { + // We need to hold the write lock here to prevent inconsistency between the in-memory + // state and the DB state. Thus, we need to wait for the DB update task to complete + // successfully. + let mut inner = self.inner.write().await; + + // We need to check that neither the nullifier tree nor the account tree have changed + // while we were waiting for the DB preparation task to complete. If either of them + // did change, we do not proceed with in-memory and database updates, since it may + // lead to an inconsistent state. + if inner.nullifier_tree.root() != nullifier_tree_old_root + || inner.account_tree.root_latest() != account_tree_old_root + { + return Err(ApplyBlockError::ConcurrentWrite); + } + + // Notify the DB update task that the write lock has been acquired, so it can commit + // the DB transaction. + inform_acquire_done + .send(()) + .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; + + // TODO: shutdown #91 + // Await for successful commit of the DB transaction. If the commit fails, we mustn't + // change in-memory state, so we return a block applying error and don't proceed with + // in-memory updates. + db_update_task + .await? + .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; + + // Update the in-memory data structures after successful commit of the DB transaction + inner + .nullifier_tree + .apply_mutations(nullifier_tree_update) + .expect("Unreachable: old nullifier tree root must be checked before this step"); + inner + .account_tree + .apply_mutations(account_tree_update) + .expect("Unreachable: old account tree root must be checked before this step"); + + inner.blockchain.push(block_commitment); + + Ok(()) + } + .in_current_span() + .await?; + + self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + + info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); + + Ok(()) + } +} diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 504ea06313..af678899e2 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -9,26 +9,26 @@ //! data exists, otherwise rebuilt from the database and persisted. use std::future::Future; +use std::num::NonZeroUsize; use std::path::Path; -use miden_protocol::Word; +use miden_crypto::merkle::mmr::Mmr; +#[cfg(feature = "rocksdb")] +use miden_large_smt_backend_rocksdb::{RocksDbConfig, RocksDbStorage}; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; -use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; +use miden_protocol::block::{BlockNumber, Blockchain}; #[cfg(not(feature = "rocksdb"))] use miden_protocol::crypto::merkle::smt::MemoryStorage; use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; +use miden_protocol::{Felt, FieldElement, Word}; #[cfg(feature = "rocksdb")] use tracing::info; use tracing::instrument; -#[cfg(feature = "rocksdb")] -use { - miden_crypto::merkle::smt::RocksDbStorage, - miden_protocol::crypto::merkle::smt::RocksDbConfig, -}; use crate::COMPONENT; use crate::db::Db; +use crate::db::models::queries::BlockHeaderCommitment; use crate::errors::{DatabaseError, StateInitializationError}; use crate::inner_forest::InnerForest; @@ -41,6 +41,18 @@ pub const ACCOUNT_TREE_STORAGE_DIR: &str = "accounttree"; /// Directory name for the nullifier tree storage within the data directory. pub const NULLIFIER_TREE_STORAGE_DIR: &str = "nullifiertree"; +/// Page size for loading account commitments from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of accounts. +const ACCOUNT_COMMITMENTS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading nullifiers from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of nullifiers. +const NULLIFIERS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading public account IDs from the database during forest rebuilding. +/// This limits memory usage when rebuilding with millions of public accounts. +const PUBLIC_ACCOUNT_IDS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(1_000).unwrap(); + // STORAGE TYPE ALIAS // ================================================================================================ @@ -66,6 +78,14 @@ pub fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInit } } +/// Converts a block number to the leaf value format used in the nullifier tree. +/// +/// This matches the format used by `NullifierBlock::from(BlockNumber)::into()`, +/// which is `[Felt::from(block_num), 0, 0, 0]`. +fn block_num_to_nullifier_leaf(block_num: BlockNumber) -> Word { + Word::from([Felt::from(block_num), Felt::ZERO, Felt::ZERO, Felt::ZERO]) +} + // STORAGE LOADER TRAIT // ================================================================================================ @@ -103,27 +123,82 @@ impl StorageLoader for MemoryStorage { Ok(MemoryStorage::default()) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + // TODO: Make the loading methodology for account and nullifier trees consistent. + // Currently we use `NullifierTree::new_unchecked()` for nullifiers but `AccountTree::new()` + // for accounts. Consider using `NullifierTree::with_storage_from_entries()` for consistency. + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -141,6 +216,7 @@ impl StorageLoader for RocksDbStorage { .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, @@ -156,15 +232,42 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, @@ -179,10 +282,36 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -201,45 +330,56 @@ pub fn load_smt(storage: S) -> Result, StateInitializ /// Loads the blockchain MMR from all block headers in the database. #[instrument(target = COMPONENT, skip_all)] pub async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); + let block_commitments = db.select_all_block_header_commitments().await?; // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX // entries. - let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + let chain_mmr = Blockchain::from_mmr_unchecked(Mmr::from( + block_commitments.iter().copied().map(BlockHeaderCommitment::word), + )); Ok(chain_mmr) } /// Loads SMT forest with storage map and vault Merkle paths for all public accounts. -#[instrument(target = COMPONENT, skip_all, fields(block_num = %block_num))] +#[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] pub async fn load_smt_forest( db: &mut Db, block_num: BlockNumber, ) -> Result { use miden_protocol::account::delta::AccountDelta; - let public_account_ids = db.select_all_public_account_ids().await?; - - // Acquire write lock once for the entire initialization let mut forest = InnerForest::new(); + let mut cursor = None; - // Process each account - for account_id in public_account_ids { - // Get the full account from the database - let account_info = db.select_account(account_id).await?; - let account = account_info.details.expect("public accounts always have details in DB"); + loop { + let page = db.select_public_account_ids_paged(PUBLIC_ACCOUNT_IDS_PAGE_SIZE, cursor).await?; - // Convert the full account to a full-state delta - let delta = - AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); + if page.account_ids.is_empty() { + break; + } - // Use the unified update method (will recognize it's a full-state delta) - forest.update_account(block_num, &delta)?; + // Process each account in this page + for account_id in page.account_ids { + // TODO: Loading the full account from the database is inefficient and will need to + // go away. + let account_info = db.select_account(account_id).await?; + let account = account_info + .details + .ok_or(StateInitializationError::PublicAccountMissingDetails(account_id))?; + + // Convert the full account to a full-state delta + let delta = AccountDelta::try_from(account).map_err(|e| { + StateInitializationError::AccountToDeltaConversionFailed(e.to_string()) + })?; + + forest.update_account(block_num, &delta)?; + } + + cursor = page.next_cursor; + if cursor.is_none() { + break; + } } Ok(forest) diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index b584f37b4a..c19699d006 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -21,59 +21,51 @@ use miden_node_proto::domain::account::{ StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; -use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; use miden_protocol::Word; -use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; +use miden_protocol::account::{AccountId, StorageMapKey, StorageMapWitness, StorageSlotName}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; -use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain}; +use miden_protocol::crypto::merkle::mmr::{MmrPeaks, MmrProof, PartialMmr}; use miden_protocol::crypto::merkle::smt::{LargeSmt, SmtProof, SmtStorage}; -use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; -use miden_protocol::transaction::{OutputNote, PartialBlockchain}; -use miden_protocol::utils::Serializable; -use tokio::sync::{Mutex, RwLock, oneshot}; -use tracing::{Instrument, info, info_span, instrument}; +use miden_protocol::note::{NoteId, NoteScript, Nullifier}; +use miden_protocol::transaction::PartialBlockchain; +use tokio::sync::{Mutex, RwLock}; +use tracing::{info, instrument}; -use crate::accounts::{AccountTreeWithHistory, HistoricalError}; +use crate::accounts::AccountTreeWithHistory; use crate::blocks::BlockStore; use crate::db::models::Page; -use crate::db::models::queries::StorageMapValuesPage; -use crate::db::{ - AccountVaultValue, - Db, - NoteRecord, - NoteSyncUpdate, - NullifierInfo, - StateSyncUpdate, -}; +use crate::db::{Db, NoteRecord, NullifierInfo}; use crate::errors::{ ApplyBlockError, DatabaseError, + GetAccountError, GetBatchInputsError, GetBlockHeaderError, GetBlockInputsError, GetCurrentBlockchainDataError, - InvalidBlockError, - NoteSyncError, StateInitializationError, - StateSyncError, }; use crate::inner_forest::{InnerForest, WitnessError}; use crate::{COMPONENT, DataDirectory}; mod loader; -pub use loader::{ +use loader::{ ACCOUNT_TREE_STORAGE_DIR, NULLIFIER_TREE_STORAGE_DIR, StorageLoader, TreeStorage, + load_mmr, + load_smt_forest, + verify_tree_consistency, }; -use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; + +mod apply_block; +mod sync_state; // STRUCTURES // ================================================================================================ @@ -190,294 +182,6 @@ impl State { }) } - // STATE MUTATOR - // -------------------------------------------------------------------------------------------- - - /// Apply changes of a new block to the DB and in-memory data structures. - /// - /// ## Note on state consistency - /// - /// The server contains in-memory representations of the existing trees, the in-memory - /// representation must be kept consistent with the committed data, this is necessary so to - /// provide consistent results for all endpoints. In order to achieve consistency, the - /// following steps are used: - /// - /// - the request data is validated, prior to starting any modifications. - /// - block is being saved into the store in parallel with updating the DB, but before - /// committing. This block is considered as candidate and not yet available for reading - /// because the latest block pointer is not updated yet. - /// - a transaction is open in the DB and the writes are started. - /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the - /// in-memory representations, which are consistent at this stage. - /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is - /// acquired, preventing concurrent reads to the in-memory data, since that will be - /// out-of-sync w.r.t. the DB. - /// - the DB transaction is committed, and requests that read only from the DB can proceed to - /// use the fresh data. - /// - the in-memory structures are updated, including the latest block pointer and the lock is - /// released. - // TODO: This span is logged in a root span, we should connect it to the parent span. - #[allow(clippy::too_many_lines)] - #[instrument(target = COMPONENT, skip_all, err)] - pub async fn apply_block(&self, block: ProvenBlock) -> Result<(), ApplyBlockError> { - let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; - - let header = block.header(); - - let tx_commitment = block.body().transactions().commitment(); - - if header.tx_commitment() != tx_commitment { - return Err(InvalidBlockError::InvalidBlockTxCommitment { - expected: tx_commitment, - actual: header.tx_commitment(), - } - .into()); - } - - let block_num = header.block_num(); - let block_commitment = header.commitment(); - - // ensures the right block header is being processed - let prev_block = self - .db - .select_block_header_by_block_num(None) - .await? - .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; - - let expected_block_num = prev_block.block_num().child(); - if block_num != expected_block_num { - return Err(InvalidBlockError::NewBlockInvalidBlockNum { - expected: expected_block_num, - submitted: block_num, - } - .into()); - } - if header.prev_block_commitment() != prev_block.commitment() { - return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); - } - - let block_data = block.to_bytes(); - - // Save the block to the block store. In a case of a rolled-back DB transaction, the - // in-memory state will be unchanged, but the block might still be written into the - // block store. Thus, such block should be considered as block candidates, but not - // finalized blocks. So we should check for the latest block when getting block from - // the store. - let store = Arc::clone(&self.block_store); - let block_save_task = tokio::spawn( - async move { store.save_block(block_num, &block_data).await }.in_current_span(), - ); - - // scope to read in-memory data, compute mutations required for updating account - // and nullifier trees, and validate the request - let ( - nullifier_tree_old_root, - nullifier_tree_update, - account_tree_old_root, - account_tree_update, - ) = { - let inner = self.inner.read().await; - - let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); - - // nullifiers can be produced only once - let duplicate_nullifiers: Vec<_> = block - .body() - .created_nullifiers() - .iter() - .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) - .copied() - .collect(); - if !duplicate_nullifiers.is_empty() { - return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); - } - - // compute updates for the in-memory data structures - - // new_block.chain_root must be equal to the chain MMR root prior to the update - let peaks = inner.blockchain.peaks(); - if peaks.hash_peaks() != header.chain_commitment() { - return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); - } - - // compute update for nullifier tree - let nullifier_tree_update = inner - .nullifier_tree - .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), - ) - .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; - - if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { - // We do our best here to notify the serve routine, if it doesn't care (dropped the - // receiver) we can't do much. - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidNullifierRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); - } - - // compute update for account tree - let account_tree_update = inner - .account_tree - .compute_mutations( - block - .body() - .updated_accounts() - .iter() - .map(|update| (update.account_id(), update.final_state_commitment())), - ) - .map_err(|e| match e { - HistoricalError::AccountTreeError(err) => { - InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) - }, - HistoricalError::MerkleError(_) => { - panic!("Unexpected MerkleError during account tree mutation computation") - }, - })?; - - if account_tree_update.as_mutation_set().root() != header.account_root() { - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidAccountRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); - } - - ( - inner.nullifier_tree.root(), - nullifier_tree_update, - inner.account_tree.root_latest(), - account_tree_update, - ) - }; - - // build note tree - let note_tree = block.body().compute_block_note_tree(); - if note_tree.root() != header.note_root() { - return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); - } - - let notes = block - .body() - .output_notes() - .map(|(note_index, note)| { - let (details, nullifier) = match note { - OutputNote::Full(note) => { - (Some(NoteDetails::from(note)), Some(note.nullifier())) - }, - OutputNote::Header(_) => (None, None), - note @ OutputNote::Partial(_) => { - return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( - note.clone(), - ))); - }, - }; - - let inclusion_path = note_tree.open(note_index); - - let note_record = NoteRecord { - block_num, - note_index, - note_id: note.id().as_word(), - note_commitment: note.commitment(), - metadata: note.metadata().clone(), - details, - inclusion_path, - }; - - Ok((note_record, nullifier)) - }) - .collect::, InvalidBlockError>>()?; - - // Signals the transaction is ready to be committed, and the write lock can be acquired - let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); - // Signals the write lock has been acquired, and the transaction can be committed - let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); - - // Extract public account updates with deltas before block is moved into async task. - // Private accounts are filtered out since they don't expose their state changes. - let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { - AccountUpdateDetails::Delta(delta) => Some(delta.clone()), - AccountUpdateDetails::Private => None, - } - })); - - // The DB and in-memory state updates need to be synchronized and are partially - // overlapping. Namely, the DB transaction only proceeds after this task acquires the - // in-memory write lock. This requires the DB update to run concurrently, so a new task is - // spawned. - let db = Arc::clone(&self.db); - let db_update_task = tokio::spawn( - async move { db.apply_block(allow_acquire, acquire_done, block, notes).await } - .in_current_span(), - ); - - // Wait for the message from the DB update task, that we ready to commit the DB transaction - acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; - - // Awaiting the block saving task to complete without errors - block_save_task.await??; - - // Scope to update the in-memory data - async move { - // We need to hold the write lock here to prevent inconsistency between the in-memory - // state and the DB state. Thus, we need to wait for the DB update task to complete - // successfully. - let mut inner = self.inner.write().await; - - // We need to check that neither the nullifier tree nor the account tree have changed - // while we were waiting for the DB preparation task to complete. If either of them - // did change, we do not proceed with in-memory and database updates, since it may - // lead to an inconsistent state. - if inner.nullifier_tree.root() != nullifier_tree_old_root - || inner.account_tree.root_latest() != account_tree_old_root - { - return Err(ApplyBlockError::ConcurrentWrite); - } - - // Notify the DB update task that the write lock has been acquired, so it can commit - // the DB transaction - inform_acquire_done - .send(()) - .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; - - // TODO: shutdown #91 - // Await for successful commit of the DB transaction. If the commit fails, we mustn't - // change in-memory state, so we return a block applying error and don't proceed with - // in-memory updates. - db_update_task - .await? - .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; - - // Update the in-memory data structures after successful commit of the DB transaction - inner - .nullifier_tree - .apply_mutations(nullifier_tree_update) - .expect("Unreachable: old nullifier tree root must be checked before this step"); - inner - .account_tree - .apply_mutations(account_tree_update) - .expect("Unreachable: old account tree root must be checked before this step"); - inner.blockchain.push(block_commitment); - - Ok(()) - } - .in_current_span() - .await?; - - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; - - info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); - - Ok(()) - } - // STATE ACCESSORS // -------------------------------------------------------------------------------------------- @@ -506,17 +210,6 @@ impl State { } } - pub async fn sync_nullifiers( - &self, - prefix_len: u32, - nullifier_prefixes: Vec, - block_range: RangeInclusive, - ) -> Result<(Vec, BlockNumber), DatabaseError> { - self.db - .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) - .await - } - /// Generates membership proofs for each one of the `nullifiers` against the latest nullifier /// tree. /// @@ -689,85 +382,6 @@ impl State { }) } - /// Loads data to synchronize a client. - /// - /// The client's request contains a list of note tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filtered based on this - /// block range. - /// - /// # Arguments - /// - /// - `block_num`: The last block *known* by the client, updates start from the next block. - /// - `account_ids`: Include the account's commitment if their _last change_ was in the result's - /// block range. - /// - `note_tags`: The tags the client is interested in, result is restricted to the first block - /// with any matches tags. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_state( - &self, - block_num: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> { - let inner = self.inner.read().await; - - let state_sync = self.db.get_state_sync(block_num, account_ids, note_tags).await?; - - let delta = if block_num == state_sync.block_header.block_num() { - // The client is in sync with the chain tip. - MmrDelta { - forest: Forest::new(block_num.as_usize()), - data: vec![], - } - } else { - // Important notes about the boundary conditions: - // - // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root - // contained in the block header always lag behind by one block, this is because the Mmr - // leaves are hashes of block headers, and we can't have self-referential hashes. These - // two points cancel out and don't require adjusting. - // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to - // be - // exclusive, so the from_forest has to be adjusted with a +1 - let from_forest = (block_num + 1).as_usize(); - let to_forest = state_sync.block_header.block_num().as_usize(); - inner - .blockchain - .as_mmr() - .get_delta(Forest::new(from_forest), Forest::new(to_forest)) - .map_err(StateSyncError::FailedToBuildMmrDelta)? - }; - - Ok((state_sync, delta)) - } - - /// Loads data to synchronize a client's notes. - /// - /// The client's request contains a list of tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filter based on this - /// block range. - /// - /// # Arguments - /// - /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the - /// first block containing a matching note. - /// - `block_range`: The range of blocks from which to synchronize notes. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_notes( - &self, - note_tags: Vec, - block_range: RangeInclusive, - ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { - let inner = self.inner.read().await; - - let (note_sync, last_included_block) = - self.db.get_note_sync(block_range, note_tags).await?; - - let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; - - Ok((note_sync, mmr_proof, last_included_block)) - } - /// Returns data needed by the block producer to construct and prove the next block. pub async fn get_block_inputs( &self, @@ -996,11 +610,11 @@ impl State { pub async fn get_account( &self, account_request: AccountRequest, - ) -> Result { + ) -> Result { let AccountRequest { block_num, account_id, details } = account_request; if details.is_some() && !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; @@ -1022,19 +636,20 @@ impl State { &self, block_num: Option, account_id: AccountId, - ) -> Result<(BlockNumber, AccountWitness), DatabaseError> { + ) -> Result<(BlockNumber, AccountWitness), GetAccountError> { let inner_state = self.inner.read().await; // Determine which block to query let (block_num, witness) = if let Some(requested_block) = block_num { // Historical query: use the account tree with history - let witness = inner_state - .account_tree - .open_at(account_id, requested_block) - .ok_or_else(|| DatabaseError::HistoricalBlockNotAvailable { - block_num: requested_block, - reason: "Block is either in the future or has been pruned from history" - .to_string(), + let witness = + inner_state.account_tree.open_at(account_id, requested_block).ok_or_else(|| { + let latest_block = inner_state.account_tree.block_number_latest(); + if requested_block > latest_block { + GetAccountError::UnknownBlock(requested_block) + } else { + GetAccountError::BlockPruned(requested_block) + } })?; (requested_block, witness) } else { @@ -1061,7 +676,7 @@ impl State { account_id: AccountId, block_num: BlockNumber, detail_request: AccountDetailRequest, - ) -> Result { + ) -> Result { let AccountDetailRequest { code_commitment, asset_vault_commitment, @@ -1069,18 +684,25 @@ impl State { } = detail_request; if !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } // Validate block exists in the blockchain before querying the database - self.validate_block_exists(block_num).await?; + { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(GetAccountError::UnknownBlock(block_num)); + } + } // Query account header and storage header together in a single DB call let (account_header, storage_header) = self .db .select_account_header_with_storage_header_at_block(account_id, block_num) .await? - .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; + .ok_or(GetAccountError::AccountNotFound(account_id, block_num))?; let account_code = match code_commitment { Some(commitment) if commitment == account_header.code_commitment() => None, @@ -1143,15 +765,6 @@ impl State { }) } - /// Returns storage map values for syncing within a block range. - pub(crate) async fn get_storage_map_sync_values( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range).await - } - /// Loads a block from the block store. Return `Ok(None)` if the block is not found. pub async fn load_block( &self, @@ -1168,39 +781,11 @@ impl State { self.inner.read().await.latest_block_num() } - /// Validates that a block exists in the blockchain - /// - /// # Attention - /// - /// Acquires a *read lock** on `self.inner`. - /// - /// # Errors - /// - /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. - async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { - let inner = self.inner.read().await; - let latest_block_num = inner.latest_block_num(); - - if block_num > latest_block_num { - return Err(DatabaseError::BlockNotFound(block_num)); - } - - Ok(()) - } - /// Emits metrics for each database table's size. pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { self.db.analyze_table_sizes().await } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.get_account_vault_sync(account_id, block_range).await - } /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( @@ -1220,16 +805,6 @@ impl State { self.db.select_note_script_by_root(root).await } - /// Returns the complete transaction records for the specified accounts within the specified - /// block range, including state commitments and note IDs. - pub async fn sync_transactions( - &self, - account_ids: Vec, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.select_transactions_records(account_ids, block_range).await - } - /// Returns vault asset witnesses for the specified account and block number. pub async fn get_vault_asset_witnesses( &self, @@ -1255,7 +830,7 @@ impl State { account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, - raw_key: Word, + raw_key: StorageMapKey, ) -> Result { let witness = self .forest diff --git a/crates/store/src/state/sync_state.rs b/crates/store/src/state/sync_state.rs new file mode 100644 index 0000000000..6568f31e61 --- /dev/null +++ b/crates/store/src/state/sync_state.rs @@ -0,0 +1,123 @@ +use std::ops::RangeInclusive; + +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrProof}; +use tracing::instrument; + +use super::State; +use crate::COMPONENT; +use crate::db::models::queries::StorageMapValuesPage; +use crate::db::{AccountVaultValue, NoteSyncUpdate, NullifierInfo}; +use crate::errors::{DatabaseError, NoteSyncError, StateSyncError}; + +// STATE SYNCHRONIZATION ENDPOINTS +// ================================================================================================ + +impl State { + /// Returns the complete transaction records for the specified accounts within the specified + /// block range, including state commitments and note IDs. + pub async fn sync_transactions( + &self, + account_ids: Vec, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.select_transactions_records(account_ids, block_range).await + } + + /// Returns the chain MMR delta for the specified block range. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_chain_mmr( + &self, + block_range: RangeInclusive, + ) -> Result { + let inner = self.inner.read().await; + + let block_from = *block_range.start(); + let block_to = *block_range.end(); + + if block_from == block_to { + return Ok(MmrDelta { + forest: Forest::new(block_from.as_usize()), + data: vec![], + }); + } + + // Important notes about the boundary conditions: + // + // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root + // contained in the block header always lag behind by one block, this is because the Mmr + // leaves are hashes of block headers, and we can't have self-referential hashes. These + // two points cancel out and don't require adjusting. + // - Mmr::get_delta is inclusive, whereas the sync request block_from is defined to be the + // last block already present in the caller's MMR. The delta should therefore start at the + // next block, so the from_forest has to be adjusted with a +1. + let from_forest = (block_from + 1).as_usize(); + let to_forest = block_to.as_usize(); + + inner + .blockchain + .as_mmr() + .get_delta(Forest::new(from_forest), Forest::new(to_forest)) + .map_err(StateSyncError::FailedToBuildMmrDelta) + } + + /// Loads data to synchronize a client's notes. + /// + /// The client's request contains a list of tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filter based on this + /// block range. + /// + /// # Arguments + /// + /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the + /// first block containing a matching note. + /// - `block_range`: The range of blocks from which to synchronize notes. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_notes( + &self, + note_tags: Vec, + block_range: RangeInclusive, + ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { + let inner = self.inner.read().await; + + let (note_sync, last_included_block) = + self.db.get_note_sync(block_range, note_tags).await?; + + let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; + + Ok((note_sync, mmr_proof, last_included_block)) + } + + pub async fn sync_nullifiers( + &self, + prefix_len: u32, + nullifier_prefixes: Vec, + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber), DatabaseError> { + self.db + .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) + .await + } + + // ACCOUNT STATE SYNCHRONIZATION + // -------------------------------------------------------------------------------------------- + + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.get_account_vault_sync(account_id, block_range).await + } + + /// Returns storage map values for syncing within a block range. + pub async fn sync_account_storage_maps( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result { + self.db.select_storage_map_sync_values(account_id, block_range).await + } +} diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index e61930937e..f2817c6049 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -21,7 +21,6 @@ testing = ["miden-protocol/testing"] [dependencies] anyhow = { workspace = true } bytes = { version = "1.10" } -figment = { features = ["env", "toml"], version = "0.10" } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } @@ -31,16 +30,18 @@ opentelemetry = { version = "0.31" } opentelemetry-otlp = { default-features = false, features = ["grpc-tonic", "tls-roots", "trace"], version = "0.31" } opentelemetry_sdk = { features = ["rt-tokio", "testing"], version = "0.31" } rand = { workspace = true } -serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } tokio = { workspace = true } tonic = { default-features = true, workspace = true } tower-http = { features = ["catch-panic"], workspace = true } tracing = { workspace = true } -tracing-forest = { features = ["chrono"], optional = true, version = "0.2" } +tracing-forest = { features = ["chrono"], optional = true, version = "0.3" } tracing-opentelemetry = { version = "0.32" } tracing-subscriber = { workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] thiserror = { workspace = true } diff --git a/crates/utils/build.rs b/crates/utils/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/crates/utils/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/crates/utils/src/config.rs b/crates/utils/src/config.rs deleted file mode 100644 index e0fc1a0a6b..0000000000 --- a/crates/utils/src/config.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::path::Path; - -use figment::Figment; -use figment::providers::{Format, Toml}; -use serde::Deserialize; - -pub const DEFAULT_NODE_RPC_PORT: u16 = 57291; -pub const DEFAULT_BLOCK_PRODUCER_PORT: u16 = 48046; -pub const DEFAULT_STORE_PORT: u16 = 28943; -pub const DEFAULT_FAUCET_SERVER_PORT: u16 = 8080; - -/// Loads the user configuration. -/// -/// This function will look for the configuration file at the provided path. If the path is -/// relative, searches in parent directories all the way to the root as well. -/// -/// The above configuration options are indented to support easy of packaging and deployment. -#[allow(clippy::result_large_err, reason = "This error crashes the node")] -pub fn load_config Deserialize<'a>>( - config_file: impl AsRef, -) -> figment::Result { - Figment::from(Toml::file(config_file.as_ref())).extract() -} diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 530e971e49..af86ccbb99 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -1,4 +1,3 @@ -pub mod config; pub mod cors; pub mod crypto; #[cfg(feature = "testing")] @@ -9,6 +8,7 @@ pub mod limiter; pub mod logging; pub mod lru_cache; pub mod panic; +pub mod signer; pub mod tracing; pub trait ErrorReport: std::error::Error { diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 1adf5be411..993b3be689 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -13,7 +13,7 @@ /// Basic request limit. pub const GENERAL_REQUEST_LIMIT: usize = 1000; -#[allow(missing_docs)] +#[expect(missing_docs)] #[derive(Debug, thiserror::Error)] #[error("parameter {which} exceeded limit {limit}: {size}")] pub struct QueryLimitError { @@ -46,21 +46,21 @@ pub trait QueryParamLimiter { /// store. pub const MAX_RESPONSE_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; -/// Used for the following RPC endpoints -/// * `state_sync` +/// Used for the following RPC endpoints: +/// * `sync_transactions` /// /// Capped at 1000 account IDs to keep SQL `IN` clauses bounded and response payloads under the -/// 4 MB budget. +/// 4 MB budget. pub struct QueryParamAccountIdLimit; impl QueryParamLimiter for QueryParamAccountIdLimit { const PARAM_NAME: &str = "account_id"; const LIMIT: usize = GENERAL_REQUEST_LIMIT; } -/// Used for the following RPC endpoints +/// Used for the following RPC endpoints: /// * `select_nullifiers_by_prefix` /// -/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload +/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload /// budget and to avoid unbounded prefix scans. pub struct QueryParamNullifierPrefixLimit; impl QueryParamLimiter for QueryParamNullifierPrefixLimit { @@ -68,12 +68,11 @@ impl QueryParamLimiter for QueryParamNullifierPrefixLimit { const LIMIT: usize = GENERAL_REQUEST_LIMIT; } -/// Used for the following RPC endpoints +/// Used for the following RPC endpoints: /// * `select_nullifiers_by_prefix` /// * `sync_nullifiers` -/// * `sync_state` /// -/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. +/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. pub struct QueryParamNullifierLimit; impl QueryParamLimiter for QueryParamNullifierLimit { const PARAM_NAME: &str = "nullifier"; @@ -83,7 +82,7 @@ impl QueryParamLimiter for QueryParamNullifierLimit { /// Used for the following RPC endpoints /// * `get_note_sync` /// -/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. +/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. pub struct QueryParamNoteTagLimit; impl QueryParamLimiter for QueryParamNoteTagLimit { const PARAM_NAME: &str = "note_tag"; @@ -103,7 +102,7 @@ impl QueryParamLimiter for QueryParamNoteIdLimit { /// Used for internal queries retrieving note inclusion proofs by commitment. /// -/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB +/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB /// payload cap. pub struct QueryParamNoteCommitmentLimit; impl QueryParamLimiter for QueryParamNoteCommitmentLimit { @@ -114,7 +113,7 @@ impl QueryParamLimiter for QueryParamNoteCommitmentLimit { /// Only used internally, not exposed via public RPC. /// /// Capped at 1000 block headers to bound internal batch operations and keep payloads below the -/// 4 MB limit. +/// 4 MB limit. pub struct QueryParamBlockLimit; impl QueryParamLimiter for QueryParamBlockLimit { const PARAM_NAME: &str = "block_header"; diff --git a/crates/utils/src/logging.rs b/crates/utils/src/logging.rs index 6593943f42..5893650303 100644 --- a/crates/utils/src/logging.rs +++ b/crates/utils/src/logging.rs @@ -10,6 +10,8 @@ use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::layer::{Filter, SubscriberExt}; use tracing_subscriber::{Layer, Registry}; +use crate::tracing::OpenTelemetrySpanExt; + /// Global tracer provider for flushing traces on panic. /// /// This is necessary because the panic hook needs access to the tracer provider to flush @@ -89,7 +91,12 @@ pub fn setup_tracing(otel: OpenTelemetry) -> anyhow::Result> { // This chains with the default panic hook to preserve backtrace printing. let default_hook = std::panic::take_hook(); std::panic::set_hook(Box::new(move |info| { - tracing::error!(panic = true, "{info}"); + tracing::error!(panic = true, info = %info, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let info_str = info.to_string(); + let wrapped = anyhow::Error::msg(info_str); + tracing::Span::current().set_error(wrapped.as_ref()); // Flush traces before the program terminates. // This ensures the panic trace is exported even though the OtelGuard won't be dropped. diff --git a/crates/utils/src/panic.rs b/crates/utils/src/panic.rs index 1b899ee618..c330fe362a 100644 --- a/crates/utils/src/panic.rs +++ b/crates/utils/src/panic.rs @@ -4,14 +4,21 @@ use http::{Response, StatusCode, header}; use http_body_util::Full; pub use tower_http::catch_panic::CatchPanicLayer; +use crate::tracing::OpenTelemetrySpanExt; + /// Custom callback that is used by Tower to fulfill the /// [`tower_http::catch_panic::ResponseForPanic`] trait. /// /// This should be added to tonic server builder as a layer via [`CatchPanicLayer::custom()`]. +#[track_caller] pub fn catch_panic_layer_fn(err: Box) -> Response> { // Log the panic error details. let err = stringify_panic_error(err); - tracing::error!(panic = true, "{err}"); + tracing::error!(panic = true, error = %err, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let wrapped = anyhow::Error::msg(err.clone()); + tracing::Span::current().set_error(wrapped.as_ref()); // Return generic error response. Response::builder() diff --git a/crates/utils/src/signer.rs b/crates/utils/src/signer.rs new file mode 100644 index 0000000000..00dbe3ebc3 --- /dev/null +++ b/crates/utils/src/signer.rs @@ -0,0 +1,36 @@ +use core::convert::Infallible; +use core::error; + +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, SecretKey, Signature}; + +// BLOCK SIGNER +// ================================================================================================ + +/// Trait which abstracts the signing of block headers with ECDSA signatures. +/// +/// Production-level implementations will involve some sort of secure remote backend. The trait also +/// allows for testing with local and ephemeral signers. +pub trait BlockSigner { + type Error: error::Error + Send + Sync + 'static; + fn sign( + &self, + header: &BlockHeader, + ) -> impl Future> + Send; + fn public_key(&self) -> PublicKey; +} + +// SECRET KEY BLOCK SIGNER +// ================================================================================================ + +impl BlockSigner for SecretKey { + type Error = Infallible; + + async fn sign(&self, header: &BlockHeader) -> Result { + Ok(self.sign(header.commitment())) + } + + fn public_key(&self) -> PublicKey { + self.public_key() + } +} diff --git a/crates/utils/src/tracing/grpc.rs b/crates/utils/src/tracing/grpc.rs index f5d0951bfa..985a2e4ba8 100644 --- a/crates/utils/src/tracing/grpc.rs +++ b/crates/utils/src/tracing/grpc.rs @@ -9,6 +9,7 @@ use crate::tracing::OpenTelemetrySpanExt; /// The span name is dynamically set using the HTTP path via the `otel.name` field. /// Additionally also pulls in remote tracing context which allows the server trace to be connected /// to the client's origin trace. +#[track_caller] pub fn grpc_trace_fn(request: &http::Request) -> tracing::Span { // A gRPC request's path ends with `..//`. let mut path_segments = request.uri().path().rsplit('/'); diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 6115e7cff3..84ed6b2484 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -18,6 +18,11 @@ workspace = true [dependencies] anyhow = { workspace = true } +aws-config = { version = "1.8.14" } +aws-sdk-kms = { version = "1.100" } +diesel = { workspace = true } +diesel_migrations = { workspace = true } +miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } @@ -31,4 +36,7 @@ tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } +[build-dependencies] +build-rs = { workspace = true } + [dev-dependencies] diff --git a/crates/validator/build.rs b/crates/validator/build.rs new file mode 100644 index 0000000000..59c416fafe --- /dev/null +++ b/crates/validator/build.rs @@ -0,0 +1,10 @@ +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in +// `validator/src/db/migrations.rs` to include the latest version of the migrations into the binary, +// see . +fn main() { + build_rs::output::rerun_if_changed("./src/db/migrations"); + // If we do one re-write, the default rules are disabled, + // hence we need to trigger explicitly on `Cargo.toml`. + // + build_rs::output::rerun_if_changed("Cargo.toml"); +} diff --git a/crates/validator/diesel.toml b/crates/validator/diesel.toml new file mode 100644 index 0000000000..bdce9175fa --- /dev/null +++ b/crates/validator/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/db/schema.rs" diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs index c1cab190bd..97b61fabcb 100644 --- a/crates/validator/src/block_validation/mod.rs +++ b/crates/validator/src/block_validation/mod.rs @@ -1,22 +1,26 @@ -use std::sync::Arc; - -use miden_protocol::block::{BlockNumber, BlockSigner, ProposedBlock}; +use miden_node_db::{DatabaseError, Db}; +use miden_protocol::block::ProposedBlock; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::errors::ProposedBlockError; -use miden_protocol::transaction::TransactionId; -use tracing::{Instrument, info_span}; +use miden_protocol::transaction::{TransactionHeader, TransactionId}; +use tracing::{info_span, instrument}; -use crate::server::ValidatedTransactions; +use crate::db::find_unvalidated_transactions; +use crate::{COMPONENT, ValidatorSigner}; // BLOCK VALIDATION ERROR // ================================================================================================ #[derive(thiserror::Error, Debug)] pub enum BlockValidationError { - #[error("transaction {0} in block {1} has not been validated")] - TransactionNotValidated(TransactionId, BlockNumber), + #[error("block contains unvalidated transactions {0:?}")] + UnvalidatedTransactions(Vec), #[error("failed to build block")] - BlockBuildingFailed(#[from] ProposedBlockError), + BlockBuildingFailed(#[source] ProposedBlockError), + #[error("failed to sign block: {0}")] + BlockSigningFailed(String), + #[error("failed to select transactions")] + DatabaseError(#[source] DatabaseError), } // BLOCK VALIDATION @@ -24,36 +28,37 @@ pub enum BlockValidationError { /// Validates a block by checking that all transactions in the proposed block have been processed by /// the validator in the past. -/// -/// Removes the validated transactions from the cache upon success. -pub async fn validate_block( +#[instrument(target = COMPONENT, skip_all, err)] +pub async fn validate_block( proposed_block: ProposedBlock, - signer: &S, - validated_transactions: Arc, + signer: &ValidatorSigner, + db: &Db, ) -> Result { - // Check that all transactions in the proposed block have been validated - let verify_span = info_span!("verify_transactions"); - for tx_header in proposed_block.transactions() { - let tx_id = tx_header.id(); - // TODO: LruCache is a poor abstraction since it locks many times. - if validated_transactions - .get(&tx_id) - .instrument(verify_span.clone()) - .await - .is_none() - { - return Err(BlockValidationError::TransactionNotValidated( - tx_id, - proposed_block.block_num(), - )); - } + // Search for any proposed transactions that have not previously been validated. + let proposed_tx_ids = + proposed_block.transactions().map(TransactionHeader::id).collect::>(); + let unvalidated_txs = db + .transact("find_unvalidated_transactions", move |conn| { + find_unvalidated_transactions(conn, &proposed_tx_ids) + }) + .await + .map_err(BlockValidationError::DatabaseError)?; + + // All proposed transactions must have been validated. + if !unvalidated_txs.is_empty() { + return Err(BlockValidationError::UnvalidatedTransactions(unvalidated_txs)); } // Build the block header. - let (header, _) = proposed_block.into_header_and_body()?; + let (header, _) = proposed_block + .into_header_and_body() + .map_err(BlockValidationError::BlockBuildingFailed)?; // Sign the header. - let signature = info_span!("sign_block").in_scope(|| signer.sign(&header)); + let signature = info_span!("sign_block") + .in_scope(async move || signer.sign(&header).await) + .await + .map_err(|err| BlockValidationError::BlockSigningFailed(err.to_string()))?; Ok(signature) } diff --git a/crates/validator/src/db/migrations.rs b/crates/validator/src/db/migrations.rs new file mode 100644 index 0000000000..240c29033b --- /dev/null +++ b/crates/validator/src/db/migrations.rs @@ -0,0 +1,25 @@ +use diesel::SqliteConnection; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use miden_node_db::DatabaseError; +use tracing::instrument; + +use crate::COMPONENT; + +// The rebuild is automatically triggered by `build.rs` as described in +// . +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); + +#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +pub fn apply_migrations(conn: &mut SqliteConnection) -> std::result::Result<(), DatabaseError> { + let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); + tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); + + let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + return Ok(()); + }; + tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); + conn.revert_last_migration(MIGRATIONS) + .expect("Duality is maintained by the developer"); + + Ok(()) +} diff --git a/crates/validator/src/db/migrations/2025062000000_setup/down.sql b/crates/validator/src/db/migrations/2025062000000_setup/down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/crates/validator/src/db/migrations/2025062000000_setup/up.sql b/crates/validator/src/db/migrations/2025062000000_setup/up.sql new file mode 100644 index 0000000000..85753d87f1 --- /dev/null +++ b/crates/validator/src/db/migrations/2025062000000_setup/up.sql @@ -0,0 +1,15 @@ +CREATE TABLE validated_transactions ( + id BLOB NOT NULL, + block_num INTEGER NOT NULL, + account_id BLOB NOT NULL, + account_delta BLOB, + input_notes BLOB, + output_notes BLOB, + initial_account_hash BLOB NOT NULL, + final_account_hash BLOB NOT NULL, + fee BLOB NOT NULL, + PRIMARY KEY (id) +) WITHOUT ROWID; + +CREATE INDEX idx_validated_transactions_account_id ON validated_transactions(account_id); +CREATE INDEX idx_validated_transactions_block_num ON validated_transactions(block_num); diff --git a/crates/validator/src/db/mod.rs b/crates/validator/src/db/mod.rs new file mode 100644 index 0000000000..4c8fe665be --- /dev/null +++ b/crates/validator/src/db/mod.rs @@ -0,0 +1,80 @@ +mod migrations; +mod models; +mod schema; + +use std::path::PathBuf; + +use diesel::SqliteConnection; +use diesel::dsl::exists; +use diesel::prelude::*; +use miden_node_db::{DatabaseError, Db}; +use miden_protocol::transaction::TransactionId; +use miden_protocol::utils::Serializable; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::migrations::apply_migrations; +use crate::db::models::ValidatedTransactionRowInsert; +use crate::tx_validation::ValidatedTransaction; + +/// Open a connection to the DB and apply any pending migrations. +#[instrument(target = COMPONENT, skip_all)] +pub async fn load(database_filepath: PathBuf) -> Result { + let db = Db::new(&database_filepath)?; + tracing::info!( + target: COMPONENT, + sqlite= %database_filepath.display(), + "Connected to the database" + ); + + db.query("migrations", apply_migrations).await?; + Ok(db) +} + +/// Inserts a new validated transaction into the database. +#[instrument(target = COMPONENT, skip_all, fields(tx_id = %tx_info.tx_id()), err)] +pub(crate) fn insert_transaction( + conn: &mut SqliteConnection, + tx_info: &ValidatedTransaction, +) -> Result { + let row = ValidatedTransactionRowInsert::new(tx_info); + let count = diesel::insert_into(schema::validated_transactions::table) + .values(row) + .on_conflict_do_nothing() + .execute(conn)?; + Ok(count) +} + +/// Scans the database for transaction Ids that do not exist. +/// +/// If the resulting vector is empty, all supplied transaction ids have been validated in the past. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT EXISTS( +/// SELECT 1 +/// FROM validated_transactions +/// WHERE id = ? +/// ); +/// ``` +#[instrument(target = COMPONENT, skip(conn), err)] +pub(crate) fn find_unvalidated_transactions( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], +) -> Result, DatabaseError> { + let mut unvalidated_tx_ids = Vec::new(); + for tx_id in tx_ids { + // Check whether each transaction id exists in the database. + let exists = diesel::select(exists( + schema::validated_transactions::table + .filter(schema::validated_transactions::id.eq(tx_id.to_bytes())), + )) + .get_result::(conn)?; + // Record any transaction ids that do not exist. + if !exists { + unvalidated_tx_ids.push(*tx_id); + } + } + Ok(unvalidated_tx_ids) +} diff --git a/crates/validator/src/db/models.rs b/crates/validator/src/db/models.rs new file mode 100644 index 0000000000..cb41197b77 --- /dev/null +++ b/crates/validator/src/db/models.rs @@ -0,0 +1,37 @@ +use diesel::prelude::*; +use miden_node_db::SqlTypeConvert; +use miden_tx::utils::Serializable; + +use crate::db::schema; +use crate::tx_validation::ValidatedTransaction; + +#[derive(Debug, Clone, PartialEq, Insertable)] +#[diesel(table_name = schema::validated_transactions)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct ValidatedTransactionRowInsert { + pub id: Vec, + pub block_num: i64, + pub account_id: Vec, + pub account_delta: Vec, + pub input_notes: Vec, + pub output_notes: Vec, + pub initial_account_hash: Vec, + pub final_account_hash: Vec, + pub fee: Vec, +} + +impl ValidatedTransactionRowInsert { + pub fn new(tx: &ValidatedTransaction) -> Self { + Self { + id: tx.tx_id().to_bytes(), + block_num: tx.block_num().to_raw_sql(), + account_id: tx.account_id().to_bytes(), + account_delta: tx.account_delta().to_bytes(), + input_notes: tx.input_notes().to_bytes(), + output_notes: tx.output_notes().to_bytes(), + initial_account_hash: tx.initial_account_hash().to_bytes(), + final_account_hash: tx.final_account_hash().to_bytes(), + fee: tx.fee().amount().to_le_bytes().to_vec(), + } + } +} diff --git a/crates/validator/src/db/schema.rs b/crates/validator/src/db/schema.rs new file mode 100644 index 0000000000..380c68b9d4 --- /dev/null +++ b/crates/validator/src/db/schema.rs @@ -0,0 +1,13 @@ +diesel::table! { + validated_transactions (id) { + id -> Binary, + block_num -> BigInt, + account_id -> Binary, + account_delta -> Binary, + input_notes -> Binary, + output_notes -> Binary, + initial_account_hash -> Binary, + final_account_hash -> Binary, + fee -> Binary, + } +} diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index a45112d275..185b9dfc6c 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -1,8 +1,11 @@ mod block_validation; +mod db; mod server; +mod signers; mod tx_validation; pub use server::Validator; +pub use signers::{KmsSigner, ValidatorSigner}; // CONSTANTS // ================================================================================================= diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 89d28d25de..ac4b56e51b 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -1,41 +1,31 @@ use std::net::SocketAddr; -use std::num::NonZeroUsize; +use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use anyhow::Context; +use miden_node_db::Db; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; use miden_node_utils::ErrorReport; -use miden_node_utils::lru_cache::LruCache; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_protocol::block::{BlockSigner, ProposedBlock}; -use miden_protocol::transaction::{ - ProvenTransaction, - TransactionHeader, - TransactionId, - TransactionInputs, -}; +use miden_protocol::block::ProposedBlock; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; use miden_tx::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tonic::Status; use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; -use tracing::{Instrument, info_span}; +use tracing::{info_span, instrument}; -use crate::COMPONENT; use crate::block_validation::validate_block; +use crate::db::{insert_transaction, load}; use crate::tx_validation::validate_transaction; - -/// Number of transactions to keep in the validated transactions cache. -const NUM_VALIDATED_TRANSACTIONS: NonZeroUsize = NonZeroUsize::new(10000).unwrap(); - -/// A type alias for a LRU cache that stores validated transactions. -pub type ValidatedTransactions = LruCache; +use crate::{COMPONENT, ValidatorSigner}; // VALIDATOR // ================================================================================ @@ -43,7 +33,7 @@ pub type ValidatedTransactions = LruCache; /// The handle into running the gRPC validator server. /// /// Facilitates the running of the gRPC server which implements the validator API. -pub struct Validator { +pub struct Validator { /// The address of the validator component. pub address: SocketAddr, /// Server-side timeout for an individual gRPC request. @@ -52,10 +42,13 @@ pub struct Validator { pub grpc_timeout: Duration, /// The signer used to sign blocks. - pub signer: S, + pub signer: ValidatorSigner, + + /// The data directory for the validator component's database files. + pub data_directory: PathBuf, } -impl Validator { +impl Validator { /// Serves the validator RPC API. /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is @@ -63,6 +56,11 @@ impl Validator { pub async fn serve(self) -> anyhow::Result<()> { tracing::info!(target: COMPONENT, endpoint=?self.address, "Initializing server"); + // Initialize database connection. + let db = load(self.data_directory.join("validator.sqlite3")) + .await + .context("failed to initialize validator database")?; + let listener = TcpListener::bind(self.address) .await .context("failed to bind to block producer address")?; @@ -86,7 +84,7 @@ impl Validator { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .timeout(self.grpc_timeout) - .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer))) + .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer, db))) .add_service(reflection_service) .add_service(reflection_service_alpha) .serve_with_incoming(TcpListenerStream::new(listener)) @@ -101,21 +99,19 @@ impl Validator { /// The underlying implementation of the gRPC validator server. /// /// Implements the gRPC API for the validator. -struct ValidatorServer { - signer: S, - validated_transactions: Arc, +struct ValidatorServer { + signer: ValidatorSigner, + db: Arc, } -impl ValidatorServer { - fn new(signer: S) -> Self { - let validated_transactions = - Arc::new(ValidatedTransactions::new(NUM_VALIDATED_TRANSACTIONS)); - Self { signer, validated_transactions } +impl ValidatorServer { + fn new(signer: ValidatorSigner, db: Db) -> Self { + Self { signer, db: db.into() } } } #[tonic::async_trait] -impl api_server::Api for ValidatorServer { +impl api_server::Api for ValidatorServer { /// Returns the status of the validator. async fn status( &self, @@ -128,6 +124,7 @@ impl api_server::Api for ValidatorServer } /// Receives a proven transaction, then validates and stores it. + #[instrument(target = COMPONENT, skip_all, err)] async fn submit_proven_transaction( &self, request: tonic::Request, @@ -150,17 +147,17 @@ impl api_server::Api for ValidatorServer tracing::Span::current().set_attribute("transaction.id", tx.id()); // Validate the transaction. - let validated_tx_header = validate_transaction(tx, inputs).await.map_err(|err| { + let tx_info = validate_transaction(tx, inputs).await.map_err(|err| { Status::invalid_argument(err.as_report_context("Invalid transaction")) })?; - // Register the validated transaction. - let tx_id = validated_tx_header.id(); - self.validated_transactions - .put(tx_id, validated_tx_header) - .instrument(info_span!("validated_txs.insert")) - .await; - + // Store the validated transaction. + self.db + .transact("insert_transaction", move |conn| insert_transaction(conn, &tx_info)) + .await + .map_err(|err| { + Status::internal(err.as_report_context("Failed to insert transaction")) + })?; Ok(tonic::Response::new(())) } @@ -181,11 +178,12 @@ impl api_server::Api for ValidatorServer // Validate the block. let signature = - validate_block(proposed_block, &self.signer, self.validated_transactions.clone()) - .await - .map_err(|err| { - tonic::Status::invalid_argument(format!("Failed to validate block: {err}",)) - })?; + validate_block(proposed_block, &self.signer, &self.db).await.map_err(|err| { + tonic::Status::invalid_argument(format!( + "Failed to validate block: {}", + err.as_report() + )) + })?; // Send the signature. info_span!("serialize").in_scope(|| { diff --git a/crates/validator/src/signers/kms.rs b/crates/validator/src/signers/kms.rs new file mode 100644 index 0000000000..d6bd285679 --- /dev/null +++ b/crates/validator/src/signers/kms.rs @@ -0,0 +1,122 @@ +use aws_sdk_kms::error::SdkError; +use aws_sdk_kms::operation::sign::SignError; +use aws_sdk_kms::types::SigningAlgorithmSpec; +use miden_node_utils::signer::BlockSigner; +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; +use miden_protocol::crypto::hash::keccak::Keccak256; +use miden_tx::utils::{DeserializationError, Serializable}; + +// KMS SIGNER ERROR +// ================================================================================================ + +#[derive(Debug, thiserror::Error)] +pub enum KmsSignerError { + /// The KMS backend errored out. + #[error("KMS service failure")] + KmsServiceError(#[source] Box>), + /// The KMS backend did not error but returned an empty signature. + #[error("KMS request returned an empty result")] + EmptyBlob, + /// The KMS backend returned a signature with an invalid format. + #[error("invalid signature format")] + SignatureFormatError(#[source] DeserializationError), + /// The KMS backend returned a signature that was not able to be verified. + #[error("invalid signature")] + InvalidSignature, +} + +// KMS SIGNER +// ================================================================================================ + +/// Block signer that uses AWS KMS to create signatures. +pub struct KmsSigner { + key_id: String, + pub_key: PublicKey, + client: aws_sdk_kms::Client, +} + +impl KmsSigner { + /// Constructs a new KMS signer and retrieves the corresponding public key from the AWS backend. + /// + /// The supplied `key_id` must be a valid AWS KMS key ID in the AWS region corresponding to the + /// typical `AWS_REGION` env var. + /// + /// A policy statement such as the following is required to allow a process on an EC2 instance + /// to use this signer: + /// ```json + /// { + /// "Sid": "AllowEc2RoleUseOfKey", + /// "Effect": "Allow", + /// "Principal": { + /// "AWS": "arn:aws:iam:::role/" + /// }, + /// "Action": [ + /// "kms:Sign", + /// "kms:Verify", + /// "kms:DescribeKey" + /// "kms:GetPublicKey" + /// ], + /// "Resource": "*" + /// }, + /// ``` + pub async fn new(key_id: impl Into) -> anyhow::Result { + let version = aws_config::BehaviorVersion::v2026_01_12(); + let config = aws_config::load_defaults(version).await; + let client = aws_sdk_kms::Client::new(&config); + let key_id = key_id.into(); + + // Retrieve DER-encoded SPKI. + let pub_key_output = client.get_public_key().key_id(key_id.clone()).send().await?; + let spki_der = pub_key_output.public_key().ok_or(KmsSignerError::EmptyBlob)?.as_ref(); + + // Decode the compressed SPKI as a Miden public key. + let pub_key = PublicKey::from_der(spki_der)?; + Ok(Self { key_id, pub_key, client }) + } +} + +impl BlockSigner for KmsSigner { + type Error = KmsSignerError; + + async fn sign(&self, header: &BlockHeader) -> Result { + // The Validator produces Ethereum-style ECDSA (secp256k1) signatures over Keccak-256 + // digests. AWS KMS does not support SHA-3 hashing for ECDSA keys + // (ECC_SECG_P256K1 being the corresponding AWS key-spec), so we pre-hash the + // message and pass MessageType::Digest. KMS signs the provided 32-byte digest + // verbatim. + let msg = header.commitment().to_bytes(); + let digest = Keccak256::hash(&msg); + + // Request signature from KMS backend. + let sign_output = self + .client + .sign() + .key_id(&self.key_id) + .signing_algorithm(SigningAlgorithmSpec::EcdsaSha256) + .message_type(aws_sdk_kms::types::MessageType::Digest) + .message(digest.to_bytes().into()) + .send() + .await + .map_err(Box::from) + .map_err(KmsSignerError::KmsServiceError)?; + + // Decode DER-encoded signature. + let sig_der = sign_output.signature().ok_or(KmsSignerError::EmptyBlob)?; + // Recovery id is not used by verify(pk), so 0 is fine. + let recovery_id = 0; + let sig = Signature::from_der(sig_der.as_ref(), recovery_id) + .map_err(KmsSignerError::SignatureFormatError)?; + + // Check the returned signature. + if sig.verify(header.commitment(), &self.pub_key) { + Ok(sig) + } else { + Err(KmsSignerError::InvalidSignature) + } + } + + fn public_key(&self) -> PublicKey { + self.pub_key.clone() + } +} diff --git a/crates/validator/src/signers/mod.rs b/crates/validator/src/signers/mod.rs new file mode 100644 index 0000000000..21bbeaa7ae --- /dev/null +++ b/crates/validator/src/signers/mod.rs @@ -0,0 +1,44 @@ +mod kms; +pub use kms::KmsSigner; +use miden_node_utils::signer::BlockSigner; +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{SecretKey, Signature}; + +// VALIDATOR SIGNER +// ================================================================================================= + +/// Signer that the Validator uses to sign blocks. +pub enum ValidatorSigner { + Kms(KmsSigner), + Local(SecretKey), +} + +impl ValidatorSigner { + /// Constructs a signer which uses an AWS KMS key for signing. + /// + /// See [`KmsSigner`] for details as to env var configuration and AWS IAM policies + /// required to use this functionality. + pub async fn new_kms(key_id: impl Into) -> anyhow::Result { + let kms_signer = KmsSigner::new(key_id).await?; + Ok(Self::Kms(kms_signer)) + } + + /// Constructs a signer which uses a local secret key for signing. + pub fn new_local(secret_key: SecretKey) -> Self { + Self::Local(secret_key) + } + + /// Signs a block header using the configured signer. + pub async fn sign(&self, header: &BlockHeader) -> anyhow::Result { + match self { + Self::Kms(signer) => { + let sig = signer.sign(header).await?; + Ok(sig) + }, + Self::Local(signer) => { + let sig = ::sign(signer, header).await?; + Ok(sig) + }, + } + } +} diff --git a/crates/validator/src/tx_validation/data_store.rs b/crates/validator/src/tx_validation/data_store.rs index ebd382e44a..ac143ef3be 100644 --- a/crates/validator/src/tx_validation/data_store.rs +++ b/crates/validator/src/tx_validation/data_store.rs @@ -3,7 +3,7 @@ use std::collections::BTreeSet; use miden_protocol::Word; -use miden_protocol::account::{AccountId, PartialAccount, StorageMapWitness}; +use miden_protocol::account::{AccountId, PartialAccount, StorageMapKey, StorageMapWitness}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::NoteScript; @@ -83,7 +83,7 @@ impl DataStore for TransactionInputsDataStore { &self, _account_id: AccountId, _map_root: Word, - _map_key: Word, + _map_key: StorageMapKey, ) -> impl FutureMaybeSend> { async move { unimplemented!( diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs index 20d610acaa..f2d1250a20 100644 --- a/crates/validator/src/tx_validation/mod.rs +++ b/crates/validator/src/tx_validation/mod.rs @@ -1,11 +1,15 @@ mod data_store; +mod validated_tx; pub use data_store::TransactionInputsDataStore; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; use miden_tx::auth::UnreachableAuth; use miden_tx::{TransactionExecutor, TransactionExecutorError, TransactionVerifier}; -use tracing::{Instrument, info_span}; +use tracing::{Instrument, info_span, instrument}; +pub use validated_tx::ValidatedTransaction; + +use crate::COMPONENT; // TRANSACTION VALIDATION ERROR // ================================================================================================ @@ -30,10 +34,11 @@ pub enum TransactionValidationError { /// provided proven transaction. /// /// Returns the header of the executed transaction if successful. +#[instrument(target = COMPONENT, skip_all, err)] pub async fn validate_transaction( proven_tx: ProvenTransaction, tx_inputs: TransactionInputs, -) -> Result { +) -> Result { // First, verify the transaction proof info_span!("verify").in_scope(|| { let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); @@ -56,7 +61,7 @@ pub async fn validate_transaction( let executed_tx_header: TransactionHeader = (&executed_tx).into(); let proven_tx_header: TransactionHeader = (&proven_tx).into(); if executed_tx_header == proven_tx_header { - Ok(executed_tx_header) + Ok(ValidatedTransaction::new(executed_tx)) } else { Err(TransactionValidationError::Mismatch { proven_tx_header: proven_tx_header.into(), diff --git a/crates/validator/src/tx_validation/validated_tx.rs b/crates/validator/src/tx_validation/validated_tx.rs new file mode 100644 index 0000000000..0233c7e8f3 --- /dev/null +++ b/crates/validator/src/tx_validation/validated_tx.rs @@ -0,0 +1,70 @@ +use miden_protocol::Word; +use miden_protocol::account::{AccountDelta, AccountId}; +use miden_protocol::asset::FungibleAsset; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::{ + ExecutedTransaction, + InputNote, + InputNotes, + OutputNotes, + TransactionId, +}; + +/// Re-executed and validated transaction that the Validator, or some ad-hoc +/// auditing procedure, might need to analyze. +/// +/// Constructed from an [`ExecutedTransaction`] that the Validator would have created while +/// re-executing and validating a [`miden_protocol::transaction::ProvenTransaction`]. +pub struct ValidatedTransaction(ExecutedTransaction); + +impl ValidatedTransaction { + /// Creates a new instance of [`ValidatedTransaction`]. + pub fn new(tx: ExecutedTransaction) -> Self { + Self(tx) + } + + /// Returns ID of the transaction. + pub fn tx_id(&self) -> TransactionId { + self.0.id() + } + + /// Returns the block number in which the transaction was executed. + pub fn block_num(&self) -> BlockNumber { + self.0.block_header().block_num() + } + + /// Returns ID of the account against which this transaction was executed. + pub fn account_id(&self) -> AccountId { + self.0.account_id() + } + + /// Returns a description of changes between the initial and final account states. + pub fn account_delta(&self) -> &AccountDelta { + self.0.account_delta() + } + + /// Returns the notes consumed in this transaction. + pub fn input_notes(&self) -> &InputNotes { + self.0.input_notes() + } + + /// Returns the notes created in this transaction. + pub fn output_notes(&self) -> &OutputNotes { + self.0.output_notes() + } + + /// Returns the commitment of the initial account state. + pub fn initial_account_hash(&self) -> Word { + self.0.initial_account().initial_commitment() + } + + /// Returns the commitment of the final account state. + pub fn final_account_hash(&self) -> Word { + self.0.final_account().to_commitment() + } + + /// Returns the fee of the transaction. + pub fn fee(&self) -> FungibleAsset { + self.0.fee() + } +} diff --git a/docs/external/src/index.md b/docs/external/src/index.md index b53f7a753b..b4d1f0afef 100644 --- a/docs/external/src/index.md +++ b/docs/external/src/index.md @@ -20,6 +20,6 @@ interface for users, dApps, wallets and other clients to submit transactions and ## Feedback Please report any issues, ask questions or leave feedback in the node repository -[here](https://github.com/0xMiden/miden-node/issues/new/choose). +[here](https://github.com/0xMiden/node/issues/new/choose). This includes outdated, misleading, incorrect or just plain confusing information :) diff --git a/docs/external/src/operator/index.md b/docs/external/src/operator/index.md index 72dc0992d0..f362151a8c 100644 --- a/docs/external/src/operator/index.md +++ b/docs/external/src/operator/index.md @@ -4,4 +4,4 @@ Welcome to the `Miden` node operator guide which should cover everything you nee Miden node. You can report any issues, ask questions or leave feedback at our project repo -[here](https://github.com/0xMiden/miden-node/issues/new/choose). +[here](https://github.com/0xMiden/node/issues/new/choose). diff --git a/docs/external/src/operator/installation.md b/docs/external/src/operator/installation.md index 1f27c639d0..7af6b84d6e 100644 --- a/docs/external/src/operator/installation.md +++ b/docs/external/src/operator/installation.md @@ -8,7 +8,7 @@ We provide Debian packages for official releases for the node software. Alternat ## Debian package -Official Debian packages are available under our [releases](https://github.com/0xMiden/miden-node/releases) page. +Official Debian packages are available under our [releases](https://github.com/0xMiden/node/releases) page. Both `amd64` and `arm64` packages are available. Note that the packages include a `systemd` service which is disabled by default. @@ -39,6 +39,18 @@ command ensures that all required libraries are installed. sudo apt install llvm clang bindgen pkg-config libssl-dev libsqlite3-dev ``` +On macOS, ensure the Xcode Command Line Tools are installed: + +```sh +xcode-select --install +``` + +If you still see `'cstdint' file not found` errors after installing the Command Line Tools (common after a macOS upgrade), try setting the SDK root explicitly: + +```sh +export SDKROOT="$(xcrun --sdk macosx --show-sdk-path)" +``` + Install the latest node binary: ```sh @@ -57,13 +69,13 @@ this for advanced use only. The incantation is a little different as you'll be t ```sh # Install from a specific branch -cargo install --locked --git https://github.com/0xMiden/miden-node miden-node --branch +cargo install --locked --git https://github.com/0xMiden/node miden-node --branch # Install a specific tag -cargo install --locked --git https://github.com/0xMiden/miden-node miden-node --tag +cargo install --locked --git https://github.com/0xMiden/node miden-node --tag # Install a specific git revision -cargo install --locked --git https://github.com/0xMiden/miden-node miden-node --rev +cargo install --locked --git https://github.com/0xMiden/node miden-node --rev ``` More information on the various `cargo install` options can be found diff --git a/docs/external/src/operator/usage.md b/docs/external/src/operator/usage.md index fa48617231..32dac5c114 100644 --- a/docs/external/src/operator/usage.md +++ b/docs/external/src/operator/usage.md @@ -50,8 +50,8 @@ miden-node bundled bootstrap \ --genesis-config-file genesis.toml ``` -The genesis configuration file should contain fee parameters, the native faucet, optionally other -fungible faucets, and also optionally, wallet definitions with assets, for example: +The genesis configuration file should contain fee parameters, optionally a custom native faucet, +optionally other fungible faucets, and also optionally, wallet definitions with assets, for example: ```toml # The UNIX timestamp of the genesis block. It will influence the hash of the genesis block. @@ -59,11 +59,13 @@ timestamp = 1717344256 # Defines the format of the block protocol to use for the genesis block. version = 1 -# The native faucet to use for fees. -[native_faucet] -symbol = "MIDEN" -decimals = 6 -max_supply = 100_000_000_000_000_000 +# The native faucet defaults to a MIDEN token (symbol="MIDEN", decimals=6, +# max_supply=100_000_000_000_000_000). To override it with a pre-built account +# file, specify the path: +# +# native_faucet = "path/to/faucet.mac" +# +# The path is relative to this configuration file. # The fee parameters to use for the genesis block. [fee_parameters] @@ -95,6 +97,17 @@ storage_mode = "private" # has_updatable_code = false # default value ``` +To include pre-built accounts (e.g. bridge or wrapped-asset faucets) in the genesis block, use +`[[account]]` entries with paths to `.mac` files: + +```toml +[[account]] +path = "bridge.mac" + +[[account]] +path = "eth_faucet.mac" +``` + ## Operation Start the node with the desired public gRPC server address. @@ -129,4 +142,4 @@ source profile.env && miden-node <...> This works well on Linux and MacOS, but Windows requires some additional scripting unfortunately. -See the `.env` files in each of the binary crates' [directories](https://github.com/0xMiden/miden-node/tree/next/bin) for a list of all available environment variables. +See the `.env` files in each of the binary crates' [directories](https://github.com/0xMiden/node/tree/next/bin) for a list of all available environment variables. diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index b26e881313..7e4598d8a4 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -7,7 +7,7 @@ sidebar_position: 1 This is a reference of the Node's public RPC interface. It consists of a gRPC API which may be used to submit transactions and query the state of the blockchain. -The gRPC service definition can be found in the Miden node's `proto` [directory](https://github.com/0xMiden/miden-node/tree/main/proto) in the `rpc.proto` file. +The gRPC service definition can be found in the Miden node's `proto` [directory](https://github.com/0xMiden/node/tree/main/proto) in the `rpc.proto` file. @@ -22,8 +22,8 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [SyncNullifiers](#syncnullifiers) - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) - [Status](#status) @@ -107,6 +107,19 @@ The witness proves the account's state commitment in the account tree. If detail If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. +#### Error Codes + +When the request fails, detailed error information is provided through gRPC status details. The following error codes may be returned: + +| Error Code | Value | gRPC Status | Description | +|---------------------------|-------|--------------------|------------------------------------------------------| +| `INTERNAL_ERROR` | 0 | `INTERNAL` | Internal server error occurred | +| `DESERIALIZATION_FAILED` | 1 | `INVALID_ARGUMENT` | Request could not be deserialized | +| `ACCOUNT_NOT_FOUND` | 2 | `INVALID_ARGUMENT` | Account not found at the requested block | +| `ACCOUNT_NOT_PUBLIC` | 3 | `INVALID_ARGUMENT` | Account details requested for a non-public account | +| `UNKNOWN_BLOCK` | 4 | `INVALID_ARGUMENT` | Requested block number is unknown | +| `BLOCK_PRUNED` | 5 | `INVALID_ARGUMENT` | Requested block has been pruned | + ### GetBlockByNumber Request the raw data for a specific block. @@ -128,7 +141,9 @@ This endpoint allows clients to discover the maximum number of items that can be "endpoints": { "CheckNullifiers": { "parameters": { "nullifier": 1000 } }, "SyncNullifiers": { "parameters": { "nullifier": 1000 } }, - "SyncState": { "parameters": { "account_id": 1000, "note_tag": 1000 } }, + "SyncTransactions": { "parameters": { "account_id": 1000 } }, + "SyncAccountVault": { "parameters": { "account_id": 1000 } }, + "SyncAccountStorageMaps": { "parameters": { "account_id": 1000 } }, "SyncNotes": { "parameters": { "note_tag": 1000 } }, "GetNotesById": { "parameters": { "note_id": 100 } } } @@ -194,18 +209,6 @@ A basic note sync can be implemented by repeatedly requesting the previous respo **Limits:** `note_tag` (1000) -### SyncState - -Iteratively sync data for specific notes and accounts. - -This request returns the next block containing data of interest. Client is expected to repeat these requests in a loop until the response reaches the head of the chain, at which point the data is fully synced. - -Each update response also contains info about new notes, accounts etc. created. It also returns Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -The low part of note tags are redacted to preserve some degree of privacy. Returned data therefore contains additional notes which should be filtered out by the client. - -**Limits:** `account_id` (1000), `note_tag` (1000) - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. @@ -214,6 +217,12 @@ Caller specifies the `account_id` of the public account and the block range (`bl This endpoint enables clients to maintain an updated view of account storage. +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range, but at most to (including) the chain tip. + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/docs/internal/book.toml b/docs/internal/book.toml index 6a0ac5db79..3c1b2892ac 100644 --- a/docs/internal/book.toml +++ b/docs/internal/book.toml @@ -6,7 +6,7 @@ multilingual = false title = "The Miden Node Developer Guide" [output.html] -git-repository-url = "https://github.com/0xMiden/miden-node" +git-repository-url = "https://github.com/0xMiden/node" [preprocessor.katex] after = ["links"] diff --git a/docs/internal/src/index.md b/docs/internal/src/index.md index 478f22e8bc..00f85898a3 100644 --- a/docs/internal/src/index.md +++ b/docs/internal/src/index.md @@ -14,6 +14,6 @@ It is also a good idea to familiarise yourself with the [operator manual](https: Living documents go stale - the code is the final arbitrator of truth. If you encounter any outdated, incorrect or misleading information, please -[open an issue](https://github.com/0xMiden/miden-node/issues/new/choose). +[open an issue](https://github.com/0xMiden/node/issues/new/choose). diff --git a/packaging/node/miden-validator.service b/packaging/node/miden-validator.service new file mode 100644 index 0000000000..7b6c5de874 --- /dev/null +++ b/packaging/node/miden-validator.service @@ -0,0 +1,16 @@ +[Unit] +Description=Miden validator +Wants=network-online.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=exec +Environment="OTEL_SERVICE_NAME=miden-validator" +EnvironmentFile=/lib/systemd/system/miden-validator.env +ExecStart=/usr/bin/miden-node validator start +WorkingDirectory=/opt/miden-validator +User=miden-validator +RestartSec=5 +Restart=always diff --git a/packaging/node/postinst b/packaging/node/postinst index 8967f9e54e..036b2d112a 100644 --- a/packaging/node/postinst +++ b/packaging/node/postinst @@ -2,25 +2,28 @@ # # This is a postinstallation script so the service can be configured and started when requested. -# user is expected by the systemd service file and `/opt/` is its working directory, -sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-node +for svc in miden-node miden-validator; do + # user is expected by the systemd service file and `/opt/` is its working directory, + sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent "$svc" -# Working folder. -if [ -d "/opt/miden-node" ] -then - echo "Directory /opt/miden-node exists." -else - mkdir -p /opt/miden-node -fi -sudo chown -R miden-node /opt/miden-node + # Working folder. + if [ -d "/opt/$svc" ] + then + echo "Directory /opt/$svc exists." + else + mkdir -p "/opt/$svc" + fi + sudo chown -R "$svc" "/opt/$svc" -# Configuration folder -if [ -d "/etc/opt/miden-node" ] -then - echo "Directory /etc/opt/miden-node exists." -else - mkdir -p /etc/opt/miden-node -fi -sudo chown -R miden-node /etc/opt/miden-node + # Configuration folder + if [ -d "/etc/opt/$svc" ] + then + echo "Directory /etc/opt/$svc exists." + else + mkdir -p "/etc/opt/$svc" + fi + sudo chown -R "$svc" "/etc/opt/$svc" + +done sudo systemctl daemon-reload diff --git a/packaging/node/postrm b/packaging/node/postrm index 893a535881..86a9846a25 100644 --- a/packaging/node/postrm +++ b/packaging/node/postrm @@ -3,7 +3,10 @@ ############### # Remove miden-node installs ############## -sudo rm -rf /lib/systemd/system/miden-node.service -sudo rm -rf /etc/opt/miden-node -sudo deluser miden-node +for svc in miden-node miden-validator; do + sudo rm -rf "/lib/systemd/system/$svc.service" + sudo rm -rf "/etc/opt/$svc" + sudo deluser "$svc" +done + sudo systemctl daemon-reload diff --git a/packaging/prover-proxy/miden-prover-proxy.service b/packaging/prover-proxy/miden-prover-proxy.service deleted file mode 100644 index 90a34c9d0f..0000000000 --- a/packaging/prover-proxy/miden-prover-proxy.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Miden delegated prover proxy -Wants=network-online.target - -[Install] -WantedBy=multi-user.target - -[Service] -Type=exec -Environment="OTEL_SERVICE_NAME=miden-prover-proxy" -EnvironmentFile=/lib/systemd/system/miden-prover-proxy.env -ExecStart=/usr/bin/miden-remote-prover start-proxy -WorkingDirectory=/opt/miden-prover-proxy -User=miden-prover-proxy -RestartSec=5 -Restart=always -LimitCORE=infinity diff --git a/packaging/prover-proxy/postinst b/packaging/prover-proxy/postinst deleted file mode 100644 index 275c8f2c7a..0000000000 --- a/packaging/prover-proxy/postinst +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# -# This is a postinstallation script so the service can be configured and started when requested. - -# User is expected by the systemd service file and `/opt/` is its working directory, -sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-prover-proxy - -# Working folder. -if [ -d "/opt/miden-prover-proxy" ] -then - echo "Directory /opt/miden-prover-proxy exists." -else - mkdir -p /opt/miden-prover-proxy -fi -sudo chown -R miden-prover-proxy /opt/miden-prover-proxy - -# Configuration folder -if [ -d "/etc/opt/miden-prover-proxy" ] -then - echo "Directory /etc/opt/miden-prover-proxy exists." -else - mkdir -p /etc/opt/miden-prover-proxy -fi -sudo chown -R miden-prover-proxy /etc/opt/miden-prover-proxy - -sudo systemctl daemon-reload -sudo systemctl enable miden-prover-proxy -sudo systemctl start miden-prover-proxy diff --git a/packaging/prover-proxy/postrm b/packaging/prover-proxy/postrm deleted file mode 100644 index 001360b5c6..0000000000 --- a/packaging/prover-proxy/postrm +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# -############### -# Remove miden-prover-proxy installs -############## -sudo rm -f /lib/systemd/system/miden-prover-proxy.* -sudo rm -rf /opt/miden-prover-proxy/ -sudo deluser miden-prover-proxy -sudo systemctl daemon-reload diff --git a/packaging/prover/miden-prover.service b/packaging/prover/miden-prover.service index a34eb26afb..4aafc09ca0 100644 --- a/packaging/prover/miden-prover.service +++ b/packaging/prover/miden-prover.service @@ -9,8 +9,7 @@ WantedBy=multi-user.target Type=exec Environment="OTEL_SERVICE_NAME=miden-prover" EnvironmentFile=/lib/systemd/system/miden-prover.env -ExecStart=/usr/bin/miden-remote-prover start-worker -WorkingDirectory=/opt/miden-prover +ExecStart=/usr/bin/miden-remote-prover User=miden-prover RestartSec=5 Restart=always diff --git a/packaging/prover/postinst b/packaging/prover/postinst index 9976ba33bf..2069a4cb69 100644 --- a/packaging/prover/postinst +++ b/packaging/prover/postinst @@ -2,27 +2,9 @@ # # This is a postinstallation script so the service can be configured and started when requested. -# User is expected by the systemd service file and `/opt/` is its working directory, +# User is expected by the systemd service file sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-prover -# Working folder. -if [ -d "/opt/miden-prover" ] -then - echo "Directory /opt/miden-prover exists." -else - mkdir -p /opt/miden-prover -fi -sudo chown -R miden-prover /opt/miden-prover - -# Configuration folder -if [ -d "/etc/opt/miden-prover" ] -then - echo "Directory /etc/opt/miden-prover exists." -else - mkdir -p /etc/opt/miden-prover -fi -sudo chown -R miden-prover /etc/opt/miden-prover - sudo systemctl daemon-reload sudo systemctl enable miden-prover sudo systemctl start miden-prover diff --git a/packaging/prover/postrm b/packaging/prover/postrm index d57bf2efcc..a633574388 100644 --- a/packaging/prover/postrm +++ b/packaging/prover/postrm @@ -3,7 +3,5 @@ ############### # Remove miden-prover installs ############## -sudo rm -f /lib/systemd/system/miden-prover.* -sudo rm -rf /opt/miden-prover/ sudo deluser miden-prover sudo systemctl daemon-reload diff --git a/proto/Cargo.toml b/proto/Cargo.toml index ba7d15f26c..ee79d7adc1 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -24,6 +24,7 @@ protox = { workspace = true } tonic-prost-build = { workspace = true } [build-dependencies] -fs-err = { workspace = true } -miette = { version = "7.6" } -protox = { workspace = true } +build-rs = { workspace = true } +fs-err = { workspace = true } +miette = { version = "7.6" } +protox = { workspace = true } diff --git a/proto/build.rs b/proto/build.rs index 3d4047e24d..7246ab4958 100644 --- a/proto/build.rs +++ b/proto/build.rs @@ -1,6 +1,3 @@ -use std::env; -use std::path::PathBuf; - use fs_err as fs; use miette::{Context, IntoDiagnostic}; use protox::prost::Message; @@ -28,55 +25,52 @@ const VALIDATOR_DESCRIPTOR: &str = "validator_file_descriptor.bin"; /// This is done only if `BUILD_PROTO` environment variable is set to `1` to avoid running the /// script on crates.io where repo-level .proto files are not available. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-changed=./proto"); - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); - - let out = - env::var("OUT_DIR").expect("env::OUT_DIR is always set in build.rs when used with cargo"); + build_rs::output::rerun_if_changed("./proto"); - let crate_root: PathBuf = env!("CARGO_MANIFEST_DIR").into(); - let proto_dir = crate_root.join("proto"); - let includes = &[proto_dir]; + let out_dir = build_rs::input::out_dir(); + let crate_root = build_rs::input::cargo_manifest_dir(); + let proto_src_dir = crate_root.join("proto"); + let includes = &[proto_src_dir]; let rpc_file_descriptor = protox::compile([RPC_PROTO], includes)?; - let rpc_path = PathBuf::from(&out).join(RPC_DESCRIPTOR); + let rpc_path = out_dir.join(RPC_DESCRIPTOR); fs::write(&rpc_path, rpc_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing rpc file descriptor")?; let remote_prover_file_descriptor = protox::compile([REMOTE_PROVER_PROTO], includes)?; - let remote_prover_path = PathBuf::from(&out).join(REMOTE_PROVER_DESCRIPTOR); + let remote_prover_path = out_dir.join(REMOTE_PROVER_DESCRIPTOR); fs::write(&remote_prover_path, remote_prover_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing remote prover file descriptor")?; let store_rpc_file_descriptor = protox::compile([STORE_RPC_PROTO], includes)?; - let store_rpc_path = PathBuf::from(&out).join(STORE_RPC_DESCRIPTOR); + let store_rpc_path = out_dir.join(STORE_RPC_DESCRIPTOR); fs::write(&store_rpc_path, store_rpc_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing store rpc file descriptor")?; let store_ntx_builder_file_descriptor = protox::compile([STORE_NTX_BUILDER_PROTO], includes)?; - let store_ntx_builder_path = PathBuf::from(&out).join(STORE_NTX_BUILDER_DESCRIPTOR); + let store_ntx_builder_path = out_dir.join(STORE_NTX_BUILDER_DESCRIPTOR); fs::write(&store_ntx_builder_path, store_ntx_builder_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing store ntx builder file descriptor")?; let store_block_producer_file_descriptor = protox::compile([STORE_BLOCK_PRODUCER_PROTO], includes)?; - let store_block_producer_path = PathBuf::from(&out).join(STORE_BLOCK_PRODUCER_DESCRIPTOR); + let store_block_producer_path = out_dir.join(STORE_BLOCK_PRODUCER_DESCRIPTOR); fs::write(&store_block_producer_path, store_block_producer_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing store block producer file descriptor")?; let block_producer_file_descriptor = protox::compile([BLOCK_PRODUCER_PROTO], includes)?; - let block_producer_path = PathBuf::from(&out).join(BLOCK_PRODUCER_DESCRIPTOR); + let block_producer_path = out_dir.join(BLOCK_PRODUCER_DESCRIPTOR); fs::write(&block_producer_path, block_producer_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing block producer file descriptor")?; let validator_file_descriptor = protox::compile([VALIDATOR_PROTO], includes)?; - let validator_path = PathBuf::from(&out).join(VALIDATOR_DESCRIPTOR); + let validator_path = out_dir.join(VALIDATOR_DESCRIPTOR); fs::write(&validator_path, validator_file_descriptor.encode_to_vec()) .into_diagnostic() .wrap_err("writing validator file descriptor")?; diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 001dc40986..3720991a04 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -45,7 +45,7 @@ service Rpc { rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (rpc.MaybeNoteScript) {} // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // @@ -63,22 +63,8 @@ service Rpc { // tip of the chain. rpc SyncNotes(rpc.SyncNotesRequest) returns (rpc.SyncNotesResponse) {} - // Returns info which can be used by the requester to sync up to the latest state of the chain - // for the objects (accounts, notes, nullifiers) the requester is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. requester is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the requester is fully synchronized with the chain. - // - // Each request also returns info about new notes, nullifiers etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. - // - // For preserving some degree of privacy, note tags and nullifiers filters contain only high - // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - // additional filtering of that data on its side. - rpc SyncState(rpc.SyncStateRequest) returns (rpc.SyncStateResponse) {} + // Returns chain MMR updates within a block range. + rpc SyncChainMmr(rpc.SyncChainMmrRequest) returns (rpc.SyncChainMmrResponse) {} // Returns account vault updates for specified account within a block range. rpc SyncAccountVault(rpc.SyncAccountVaultRequest) returns (rpc.SyncAccountVaultResponse) {} @@ -96,7 +82,7 @@ service Rpc { // Store API for the BlockProducer component service BlockProducer { // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + rpc ApplyBlock(ApplyBlockRequest) returns (google.protobuf.Empty) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. @@ -112,6 +98,18 @@ service BlockProducer { rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} } +// APPLY BLOCK REQUEST +// ================================================================================================ + +// Applies a block to the state. +message ApplyBlockRequest { + // Ordered batches encoded using [winter_utils::Serializable] implementation for + // [miden_objects::batch::OrderedBatches]. + bytes ordered_batches = 1; + // Block signed by the Validator. + blockchain.SignedBlock block = 2; +} + // GET BLOCK INPUTS // ================================================================================================ @@ -271,7 +269,7 @@ service NtxBuilder { rpc GetAccount(rpc.AccountRequest) returns (rpc.AccountResponse) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (rpc.MaybeNoteScript) {} // Returns vault asset witnesses for the specified account. rpc GetVaultAssetWitnesses(VaultAssetWitnessesRequest) returns (VaultAssetWitnessesResponse) {} diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index f521fc1c5f..1a218539ee 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -17,6 +17,13 @@ service Api { // Returns the status info of the node. rpc Status(google.protobuf.Empty) returns (RpcStatus) {} + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + // Returns a Sparse Merkle Tree opening proof for each requested nullifier // // Each proof demonstrates either: @@ -44,7 +51,10 @@ service Api { rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (MaybeNoteScript) {} + + // TRANSACTION SUBMISSION ENDPOINTS + // -------------------------------------------------------------------------------------------- // Submits proven transaction to the Miden network. Returns the node's current block height. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} @@ -63,54 +73,38 @@ service Api { // Returns the node's current block height. rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} - // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - // - // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} + // STATE SYNCHRONIZATION ENDPOINTS + // -------------------------------------------------------------------------------------------- - // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - // Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. + // Returns info which can be used by the client to sync up to the tip of chain for the notes + // they are interested in. // - // Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - // matching notes for. The request will then return the next block containing any note matching the provided tags. + // Client specifies the `note_tags` they are interested in, and the block height from which to + // search for new for matching notes for. The request will then return the next block containing + // any note matching the provided tags. // // The response includes each note's metadata and inclusion proof. // - // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - // tip of the chain. + // A basic note sync can be implemented by repeatedly requesting the previous response's block + // until reaching the tip of the chain. rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} - // Returns info which can be used by the client to sync up to the latest state of the chain - // for the objects (accounts and notes) the client is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. Client is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the client is fully synchronized with the chain. - // - // Each update response also contains info about new notes, accounts etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // - // For preserving some degree of privacy, note tags contain only high - // part of hashes. Thus, returned data contains excessive notes, client can make - // additional filtering of that data on its side. - rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} + // Note that only 16-bit prefixes are supported at this time. + rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} + + // Returns account vault updates for specified account within a block range. + rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} // Returns storage map updates for specified account and storage slots within a block range. rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - - // Returns the query parameter limits configured for RPC methods. - // - // These define the maximum number of each parameter a method will accept. - // Exceeding the limit will result in the request being rejected and you should instead send - // multiple smaller requests. - rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + // Returns MMR delta needed to synchronize the chain MMR within the requested block range. + rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} } // RPC STATUS @@ -486,51 +480,26 @@ message SyncNotesResponse { repeated note.NoteSyncRecord notes = 4; } -// SYNC STATE +// SYNC CHAIN MMR // ================================================================================================ -// State synchronization request. -// -// Specifies state updates the requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -// `account_ids` for that block range. -message SyncStateRequest { - // Last block known by the requester. The response will contain data starting from the next block, - // until the first block which contains a note of matching the requested tag, or the chain tip - // if there are no notes. - fixed32 block_num = 1; - - // Accounts' commitment to include in the response. +// Chain MMR synchronization request. +message SyncChainMmrRequest { + // Block range from which to synchronize the chain MMR. // - // An account commitment will be included if-and-only-if it is the latest update. Meaning it is - // possible there was an update to the account for the given range, but if it is not the latest, - // it won't be included in the response. - repeated account.AccountId account_ids = 2; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 3; + // The response will contain MMR delta starting after `block_range.block_from` up to + // `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last + // block already present in the caller's MMR so the delta begins at the next block. + BlockRange block_range = 1; } -// Represents the result of syncing state request. -message SyncStateResponse { - // Number of the latest block in the chain. - fixed32 chain_tip = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - primitives.MmrDelta mmr_delta = 3; - - // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - repeated account.AccountSummary accounts = 5; - - // List of transactions executed against requested accounts between `request.block_num + 1` and - // `response.block_header.block_num`. - repeated transaction.TransactionSummary transactions = 6; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 7; +// Represents the result of syncing chain MMR. +message SyncChainMmrResponse { + // For which block range the MMR delta is returned. + BlockRange block_range = 1; + // Data needed to update the partial MMR from `request.block_range.block_from + 1` to + // `response.block_range.block_to` or the chain tip. + primitives.MmrDelta mmr_delta = 2; } // SYNC ACCOUNT STORAGE MAP @@ -650,7 +619,7 @@ message TransactionRecord { // Represents the query parameter limits for RPC endpoints. message RpcLimits { // Maps RPC endpoint names to their parameter limits. - // Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + // Key: endpoint name (e.g., "CheckNullifiers") // Value: map of parameter names to their limit values map endpoints = 1; } diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 6f53cd4f33..43828d4dc9 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -7,11 +7,11 @@ import "types/primitives.proto"; // BLOCK // ================================================================================================ -// Represents a block. -message Block { - // Block data encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::block::Block]. - bytes block = 1; +// Represents a signed block. +message SignedBlock { + BlockHeader header = 1; + BlockBody body = 2; + BlockSignature signature = 3; } // Represents a proposed block. diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index ac125daa06..0824a03073 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -7,6 +7,16 @@ import "types/account.proto"; // NOTES // ================================================================================================ +// The type of a note. +enum NoteType { + // Unspecified note type (default value, should not be used). + NOTE_TYPE_UNSPECIFIED = 0; + // Public note - details are visible on-chain. + NOTE_TYPE_PUBLIC = 1; + // Private note - details are not visible on-chain. + NOTE_TYPE_PRIVATE = 2; +} + // Represents a note's ID. message NoteId { // A unique identifier of the note which is a 32-byte commitment to the underlying note data. @@ -24,8 +34,8 @@ message NoteMetadata { // The account which sent the note. account.AccountId sender = 1; - // The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - uint32 note_type = 2; + // The type of the note. + NoteType note_type = 2; // A value which can be used by the recipient(s) to identify notes intended for them. // @@ -110,9 +120,9 @@ message NoteSyncRecord { primitives.SparseMerklePath inclusion_path = 4; } -// Represents a note root. -message NoteRoot { - // The root of the note. +// Represents a commitment to a note script. +message NoteScriptRoot { + // Root of the note script. primitives.Digest root = 1; } @@ -120,6 +130,6 @@ message NoteRoot { message NoteScript { // Entrypoint of the script. uint32 entrypoint = 1; - // Mast of the script. + // MAST of the script. bytes mast = 2; } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6744e56e15..d9a424cef9 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.90" +channel = "1.91" components = ["clippy", "rust-src", "rustfmt"] profile = "minimal" targets = ["wasm32-unknown-unknown"] diff --git a/scripts/check-features.sh b/scripts/check-features.sh index 0b128a1855..f51e5c71f8 100755 --- a/scripts/check-features.sh +++ b/scripts/check-features.sh @@ -7,9 +7,8 @@ set -euo pipefail echo "Checking all feature combinations with cargo-hack..." -# Set environment variables to treat warnings as errors and build protos +# Set environment variables to treat warnings as errors export RUSTFLAGS="-D warnings" -export BUILD_PROTO=1 # Run cargo-hack with comprehensive feature checking cargo hack check \