diff --git a/.github/workflows/opencode-pr.yml b/.github/workflows/opencode-pr.yml index fe190015..db6bdfb4 100644 --- a/.github/workflows/opencode-pr.yml +++ b/.github/workflows/opencode-pr.yml @@ -9,6 +9,7 @@ jobs: # Don't run on draft PRs; do run when they become ready_for_review. if: ${{ github.event.pull_request.draft == false }} runs-on: blacksmith-2vcpu-ubuntu-2404 + timeout-minutes: 10 permissions: id-token: write contents: write @@ -18,12 +19,59 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Configure git run: | git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" + - name: Fetch previous opencode reviews + id: previous-reviews + run: | + # Get PR number + PR_NUMBER="${{ github.event.pull_request.number }}" + + # Fetch review comments from opencode-agent + echo "Fetching previous automated reviews..." + + # Get all reviews + gh api /repos/${{ github.repository }}/pulls/${PR_NUMBER}/reviews \ + --jq '.[] | select(.user.login == "opencode-agent[bot]") | {body: .body, submitted_at: .submitted_at}' > /tmp/opencode_reviews.json 2>/dev/null || echo "[]" > /tmp/opencode_reviews.json + + # Get all PR review comments (inline comments) + gh api /repos/${{ github.repository }}/pulls/${PR_NUMBER}/comments \ + --jq '.[] | select(.user.login == "opencode-agent[bot]") | {body: .body, path: .path, line: .line, created_at: .created_at}' > /tmp/opencode_comments.json 2>/dev/null || echo "[]" > /tmp/opencode_comments.json + + # Format the previous reviews for the prompt + # Use a simpler approach without heredoc to avoid delimiter issues + REVIEW_CONTENT="## Previous Automated Reviews from opencode-agent:\n\n" + + # Process reviews + if [ -s /tmp/opencode_reviews.json ] && [ "$(cat /tmp/opencode_reviews.json)" != "[]" ]; then + while IFS= read -r review; do + if [ -n "$review" ] && [ "$review" != "null" ]; then + body=$(echo "$review" | jq -r '.body // empty') + date=$(echo "$review" | jq -r '.submitted_at // empty') + if [ -n "$body" ] && [ "$body" != "null" ]; then + REVIEW_CONTENT="${REVIEW_CONTENT}### Review from ${date}\n${body}\n\n---\n\n" + fi + fi + done < /tmp/opencode_reviews.json + else + REVIEW_CONTENT="${REVIEW_CONTENT}No previous automated reviews found.\n" + fi + + # Write to environment file using proper escaping + echo "PREVIOUS_REVIEWS<> $GITHUB_ENV + printf '%s\n' "$REVIEW_CONTENT" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + echo "Previous reviews fetched and formatted for context" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Install Nix uses: DeterminateSystems/nix-installer-action@v16 @@ -36,12 +84,111 @@ jobs: - name: Run opencode uses: anomalyco/opencode/github@latest env: - MINIMAX_API_KEY: ${{ secrets.MINIMAX_API_KEY }} + KIMI_API_KEY: ${{ secrets.KIMI_API_KEY }} with: - model: minimax-coding-plan/MiniMax-M2.1 + model: kimi-for-coding/k2p5 prompt: | - 1. If the PR tags or references any issues (e.g., "Fixes #123"), verify if the implementation fully satisfies the requirements of those issues. If no issues are tagged, proceed without mentioning it. - 2. If there are previous code reviews on this PR, verify if the feedback has been addressed. If there are no previous reviews, do not mention their absence. - 3. Analyze for code quality issues, potential bugs, and architectural improvements. - 4. Enforce SOLID principles. Provide a table breakdown scoring the PR on each of the 5 SOLID points (Single Responsibility, Open/Closed, Liskov Substitution, Interface Segregation, Dependency Inversion). For each point, provide a score (e.g., 0-10 or N/A) and specific, actionable suggestions on where and how to improve. + You are reviewing a pull request. + + ${{ env.PREVIOUS_REVIEWS }} + + --- + + **YOUR TASK:** Analyze the CURRENT code changes and previous reviews above, then output your review in the following STRICT STRUCTURE: + + **CRITICAL INSTRUCTIONS:** + 1. **CHECK PREVIOUS ISSUES FIRST:** Look at the "Previous Automated Reviews" section above. For each issue previously reported (Critical, High, Medium, Low), verify if it still exists in the current code. + 2. **ACKNOWLEDGE FIXES:** If a previously reported issue has been fixed, state "✅ **[FIXED]** Previous issue: [brief description]" in the appropriate section. + 3. **ONLY REPORT NEW/UNRESOLVED ISSUES:** Do NOT re-report issues that have already been fixed. Only report issues that are still present in the current code. + 4. **TRACK CHANGES:** If an issue was reported in a previous review but the code has changed, verify the new code and report the issue with updated file:line references if it still exists. + + --- + + ## 📋 Summary + First, check if the PR description mentions any linked issues (e.g., "Closes #123", "Fixes #456", "Resolves #789"). + + If linked issues are found: + - Mention the issue number(s) explicitly + - Verify the PR actually implements what the issue(s) requested + - State whether the implementation fully satisfies the issue requirements + + Then provide 2-3 sentences summarizing the PR purpose, scope, and overall quality. + + ## 🔴 Critical Issues (Must Fix - Blocks Merge) + **IMPORTANT:** Check previous reviews first. If critical issues were reported before, verify if they're fixed. If fixed, say "✅ All previously reported critical issues have been resolved." + + Only report NEW critical issues that could cause crashes, security vulnerabilities, data loss, or major bugs. + + For each issue, use this exact format: + ``` + **[CRITICAL]** `File:Line` - Issue Title + **Confidence:** High|Medium|Low (how sure you are this is a real problem) + **Description:** Clear explanation of the issue + **Impact:** What could go wrong if merged + **Suggested Fix:** Specific code changes needed + ``` + + ## âš ī¸ High Priority Issues (Should Fix) + Same approach as Critical - check previous reviews first, acknowledge fixes, only report unresolved issues. + + Same format as Critical, but with **[HIGH]** prefix. + + ## 💡 Medium Priority Issues (Nice to Fix) + Same approach - verify previous reports, acknowledge fixes, report only still-present issues. + + Same format, with **[MEDIUM]** prefix. + + ## â„šī¸ Low Priority Suggestions (Optional) + Same approach. + + Same format, with **[LOW]** prefix. + + ## 📊 SOLID Principles Score + | Principle | Score | Notes | + |-----------|-------|-------| + | Single Responsibility | 0-10 | Brief justification | + | Open/Closed | 0-10 | Brief justification | + | Liskov Substitution | 0-10 | Brief justification | + | Interface Segregation | 0-10 | Brief justification | + | Dependency Inversion | 0-10 | Brief justification | + | **Average** | **X.X** | | + + ## đŸŽ¯ Final Assessment + + ### Overall Confidence Score: XX% + Rate your confidence in this PR being ready to merge (0-100%). + **How to interpret:** + - 0-30%: Major concerns, do not merge without significant rework + - 31-60%: Moderate concerns, several issues need addressing + - 61-80%: Minor concerns, mostly ready with some fixes + - 81-100%: High confidence, ready to merge or with trivial fixes + + ### Confidence Breakdown: + - **Code Quality:** XX% (how well-written is the code?) + - **Completeness:** XX% (does it fulfill requirements?) + - **Risk Level:** XX% (how risky is this change?) + - **Test Coverage:** XX% (are changes adequately tested?) + + ### Merge Readiness: + - [ ] All critical issues resolved + - [ ] SOLID average score >= 6.0 + - [ ] Overall confidence >= 60% + - [ ] No security concerns + - [ ] Tests present and passing (if applicable) + + ### Verdict: + **MERGE** | **MERGE WITH FIXES** | **DO NOT MERGE** + + One-sentence explanation of the verdict. + + --- + + **Review Guidelines:** + 1. **MOST IMPORTANT:** Always check previous reviews and verify if issues are fixed before reporting them again + 2. Acknowledge fixes explicitly with ✅ **[FIXED]** markers + 3. Check the PR description for linked issues ("Fixes #123", "Closes #456", etc.) and verify the implementation + 4. Be extremely specific with file paths and line numbers + 5. Confidence scores should reflect how certain you are - use "Low" when unsure + 6. If you have nothing meaningful to add to a section, write "None identified" instead of omitting it + 7. Always provide actionable fixes, never just complaints diff --git a/.github/workflows/opencode-triage.yml b/.github/workflows/opencode-triage.yml index da436c90..51b31f42 100644 --- a/.github/workflows/opencode-triage.yml +++ b/.github/workflows/opencode-triage.yml @@ -38,9 +38,9 @@ jobs: - uses: anomalyco/opencode/github@latest if: steps.check.outputs.result == 'true' env: - MINIMAX_API_KEY: ${{ secrets.MINIMAX_API_KEY }} + KIMI_API_KEY: ${{ secrets.KIMI_API_KEY }} with: - model: minimax-coding-plan/MiniMax-M2.1 + model: kimi-for-coding/k2p5 prompt: | Analyze this issue. You have access to the codebase context. **CRITICAL: Your only allowed action is to post a COMMENT on the issue. DO NOT create branches, pull requests, or attempt to modify the codebase.** diff --git a/.github/workflows/opencode.yml b/.github/workflows/opencode.yml index 921e95ec..90110ae8 100644 --- a/.github/workflows/opencode.yml +++ b/.github/workflows/opencode.yml @@ -40,6 +40,6 @@ jobs: - name: Run opencode uses: anomalyco/opencode/github@latest env: - MINIMAX_API_KEY: ${{ secrets.MINIMAX_API_KEY }} + KIMI_API_KEY: ${{ secrets.KIMI_API_KEY }} with: - model: minimax-coding-plan/MiniMax-M2.1 + model: kimi-for-coding/k2p5 diff --git a/.gitignore b/.gitignore index 0398fc41..4a1ce0be 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,8 @@ test_output.txt *.swp .DS_Store *.spv +!assets/shaders/vulkan/lpv_inject.comp.spv +!assets/shaders/vulkan/lpv_propagate.comp.spv wiki/ *.exr *.hdr diff --git a/assets/config/presets.json b/assets/config/presets.json index 75ddd7a9..6ac4450b 100644 --- a/assets/config/presets.json +++ b/assets/config/presets.json @@ -2,6 +2,7 @@ { "name": "LOW", "shadow_quality": 0, + "shadow_distance": 150.0, "shadow_pcf_samples": 4, "shadow_cascade_blend": false, "pbr_enabled": false, @@ -26,6 +27,7 @@ { "name": "MEDIUM", "shadow_quality": 1, + "shadow_distance": 250.0, "shadow_pcf_samples": 8, "shadow_cascade_blend": false, "pbr_enabled": true, @@ -50,6 +52,7 @@ { "name": "HIGH", "shadow_quality": 2, + "shadow_distance": 500.0, "shadow_pcf_samples": 12, "shadow_cascade_blend": true, "pbr_enabled": true, @@ -74,6 +77,7 @@ { "name": "ULTRA", "shadow_quality": 3, + "shadow_distance": 1000.0, "shadow_pcf_samples": 16, "shadow_cascade_blend": true, "pbr_enabled": true, diff --git a/assets/shaders/vulkan/g_pass.frag b/assets/shaders/vulkan/g_pass.frag index f14f09ea..cc4f0b01 100644 --- a/assets/shaders/vulkan/g_pass.frag +++ b/assets/shaders/vulkan/g_pass.frag @@ -31,6 +31,8 @@ layout(set = 0, binding = 0) uniform GlobalUniforms { vec4 pbr_params; vec4 volumetric_params; vec4 viewport_size; + vec4 lpv_params; + vec4 lpv_origin; } global; // 4x4 Bayer matrix for dithered LOD transitions diff --git a/assets/shaders/vulkan/lpv_inject.comp b/assets/shaders/vulkan/lpv_inject.comp new file mode 100644 index 00000000..fb1889ea --- /dev/null +++ b/assets/shaders/vulkan/lpv_inject.comp @@ -0,0 +1,71 @@ +#version 450 + +layout(local_size_x = 4, local_size_y = 4, local_size_z = 4) in; + +struct LightData { + vec4 pos_radius; + vec4 color; +}; + +// SH L1: 3 output images (R, G, B), each storing 4 SH coefficients (L0, L1x, L1y, L1z) +layout(set = 0, binding = 0, rgba32f) uniform writeonly image3D lpv_out_r; +layout(set = 0, binding = 1, rgba32f) uniform writeonly image3D lpv_out_g; +layout(set = 0, binding = 2, rgba32f) uniform writeonly image3D lpv_out_b; + +layout(set = 0, binding = 3) readonly buffer Lights { + LightData lights[]; +} light_buffer; + +layout(push_constant) uniform InjectPush { + vec4 grid_origin_cell; + vec4 grid_params; + uint light_count; +} push_data; + +// SH L1 basis functions (unnormalized for compact storage) +// Y_00 = 0.282095 (DC) +// Y_1m = 0.488603 * {x, y, z} (directional) +const float SH_C0 = 0.282095; +const float SH_C1 = 0.488603; + +void main() { + int gridSize = int(push_data.grid_params.x); + ivec3 cell = ivec3(gl_GlobalInvocationID.xyz); + if (any(greaterThanEqual(cell, ivec3(gridSize)))) { + return; + } + + vec3 world_pos = push_data.grid_origin_cell.xyz + vec3(cell) * push_data.grid_origin_cell.w + vec3(0.5 * push_data.grid_origin_cell.w); + + // Accumulate SH coefficients per color channel + vec4 sh_r = vec4(0.0); // (L0, L1x, L1y, L1z) for red + vec4 sh_g = vec4(0.0); // for green + vec4 sh_b = vec4(0.0); // for blue + + for (uint i = 0; i < push_data.light_count; i++) { + vec3 light_pos = light_buffer.lights[i].pos_radius.xyz; + float radius = max(light_buffer.lights[i].pos_radius.w, 0.001); + vec3 light_color = light_buffer.lights[i].color.rgb; + + vec3 diff = world_pos - light_pos; + float d = length(diff); + if (d < radius) { + float att = 1.0 - (d / radius); + att *= att; + + // Direction from light to cell (normalized), used for SH L1 directional encoding + vec3 dir = (d > 0.001) ? normalize(diff) : vec3(0.0, 1.0, 0.0); + + // SH L1 projection: project the incoming radiance along direction + vec4 sh_coeffs = vec4(SH_C0, SH_C1 * dir.x, SH_C1 * dir.y, SH_C1 * dir.z); + + sh_r += sh_coeffs * light_color.r * att; + sh_g += sh_coeffs * light_color.g * att; + sh_b += sh_coeffs * light_color.b * att; + } + } + + imageStore(lpv_out_r, cell, sh_r); + imageStore(lpv_out_g, cell, sh_g); + imageStore(lpv_out_b, cell, sh_b); +} diff --git a/assets/shaders/vulkan/lpv_inject.comp.spv b/assets/shaders/vulkan/lpv_inject.comp.spv new file mode 100644 index 00000000..0f5ce648 Binary files /dev/null and b/assets/shaders/vulkan/lpv_inject.comp.spv differ diff --git a/assets/shaders/vulkan/lpv_propagate.comp b/assets/shaders/vulkan/lpv_propagate.comp new file mode 100644 index 00000000..a607ee0f --- /dev/null +++ b/assets/shaders/vulkan/lpv_propagate.comp @@ -0,0 +1,117 @@ +#version 450 + +layout(local_size_x = 4, local_size_y = 4, local_size_z = 4) in; + +// Source SH grids (read) +layout(set = 0, binding = 0, rgba32f) uniform readonly image3D src_r; +layout(set = 0, binding = 1, rgba32f) uniform readonly image3D src_g; +layout(set = 0, binding = 2, rgba32f) uniform readonly image3D src_b; + +// Destination SH grids (write) +layout(set = 0, binding = 3, rgba32f) uniform writeonly image3D dst_r; +layout(set = 0, binding = 4, rgba32f) uniform writeonly image3D dst_g; +layout(set = 0, binding = 5, rgba32f) uniform writeonly image3D dst_b; + +// Occlusion grid +layout(set = 0, binding = 6) readonly buffer OcclusionGrid { + uint data[]; +} occlusion; + +layout(push_constant) uniform PropPush { + uint grid_size; + uint _pad0[3]; + vec4 propagation; +} push_data; + +uint flatIndex(ivec3 cell, int gridSize) { + return uint(cell.x) + uint(cell.y) * uint(gridSize) + uint(cell.z) * uint(gridSize) * uint(gridSize); +} + +// SH L1 constants +const float SH_C0 = 0.282095; +const float SH_C1 = 0.488603; + +// Evaluate SH in a given direction to get the scalar irradiance contribution +float evaluateSH(vec4 sh, vec3 dir) { + return max(0.0, sh.x * SH_C0 + sh.y * SH_C1 * dir.x + sh.z * SH_C1 * dir.y + sh.w * SH_C1 * dir.z); +} + +// Project a scalar value in a given direction into SH coefficients +vec4 projectSH(float value, vec3 dir) { + return vec4(value * SH_C0, value * SH_C1 * dir.x, value * SH_C1 * dir.y, value * SH_C1 * dir.z); +} + +void main() { + int gridSize = int(push_data.grid_size); + ivec3 cell = ivec3(gl_GlobalInvocationID.xyz); + if (any(greaterThanEqual(cell, ivec3(gridSize)))) { + return; + } + + // If current cell is opaque, zero out + uint selfOcc = occlusion.data[flatIndex(cell, gridSize)]; + if (selfOcc != 0u) { + imageStore(dst_r, cell, vec4(0.0)); + imageStore(dst_g, cell, vec4(0.0)); + imageStore(dst_b, cell, vec4(0.0)); + return; + } + + // Center retention + float retention = push_data.propagation.y; + vec4 center_r = imageLoad(src_r, cell) * retention; + vec4 center_g = imageLoad(src_g, cell) * retention; + vec4 center_b = imageLoad(src_b, cell) * retention; + + vec4 accum_r = center_r; + vec4 accum_g = center_g; + vec4 accum_b = center_b; + + float f = push_data.propagation.x; + + // 6-connected neighbor propagation with SH directional transfer + // For each face, evaluate neighbor SH in the transfer direction and re-project + ivec3 offsets[6] = ivec3[6]( + ivec3(-1, 0, 0), ivec3(1, 0, 0), + ivec3(0, -1, 0), ivec3(0, 1, 0), + ivec3(0, 0, -1), ivec3(0, 0, 1) + ); + // Direction from neighbor to current cell (transfer direction) + vec3 dirs[6] = vec3[6]( + vec3( 1, 0, 0), vec3(-1, 0, 0), + vec3( 0, 1, 0), vec3( 0,-1, 0), + vec3( 0, 0, 1), vec3( 0, 0,-1) + ); + + for (int i = 0; i < 6; i++) { + ivec3 n = cell + offsets[i]; + if (any(lessThan(n, ivec3(0))) || any(greaterThanEqual(n, ivec3(gridSize)))) { + continue; + } + uint nOcc = occlusion.data[flatIndex(n, gridSize)]; + if (nOcc != 0u) { + continue; + } + + vec3 transferDir = dirs[i]; + + // Load neighbor SH coefficients + vec4 n_r = imageLoad(src_r, n); + vec4 n_g = imageLoad(src_g, n); + vec4 n_b = imageLoad(src_b, n); + + // Evaluate how much light the neighbor sends in the transfer direction + float eval_r = evaluateSH(n_r, transferDir); + float eval_g = evaluateSH(n_g, transferDir); + float eval_b = evaluateSH(n_b, transferDir); + + // Re-project into SH at current cell using the same direction + accum_r += projectSH(eval_r, transferDir) * f; + accum_g += projectSH(eval_g, transferDir) * f; + accum_b += projectSH(eval_b, transferDir) * f; + } + + imageStore(dst_r, cell, accum_r); + imageStore(dst_g, cell, accum_g); + imageStore(dst_b, cell, accum_b); +} diff --git a/assets/shaders/vulkan/lpv_propagate.comp.spv b/assets/shaders/vulkan/lpv_propagate.comp.spv new file mode 100644 index 00000000..fd724bc7 Binary files /dev/null and b/assets/shaders/vulkan/lpv_propagate.comp.spv differ diff --git a/assets/shaders/vulkan/post_process.frag b/assets/shaders/vulkan/post_process.frag index 67b9824a..56436add 100644 --- a/assets/shaders/vulkan/post_process.frag +++ b/assets/shaders/vulkan/post_process.frag @@ -5,10 +5,15 @@ layout(location = 0) out vec4 outColor; layout(set = 0, binding = 0) uniform sampler2D uHDRBuffer; layout(set = 0, binding = 2) uniform sampler2D uBloomTexture; +layout(set = 0, binding = 3) uniform sampler3D uColorLUT; layout(push_constant) uniform PostProcessParams { - float bloomEnabled; // 0.0 = disabled, 1.0 = enabled - float bloomIntensity; // Final bloom blend intensity + float bloomEnabled; // 0.0 = disabled, 1.0 = enabled + float bloomIntensity; // Final bloom blend intensity + float vignetteIntensity; // 0.0 = none, 1.0 = full vignette + float filmGrainIntensity; // 0.0 = none, 1.0 = heavy grain + float colorGradingEnabled; // 0.0 = disabled, 1.0 = enabled + float colorGradingIntensity; // LUT blend intensity (0.0 = original, 1.0 = full LUT) } postParams; layout(set = 0, binding = 1) uniform GlobalUniforms { @@ -107,6 +112,58 @@ vec3 ACESFilm(vec3 x) { return clamp((x * (a * x + b)) / (x * (c * x + d) + e), 0.0, 1.0); } +// Vignette effect - darkens edges of the screen +vec3 applyVignette(vec3 color, vec2 uv, float intensity) { + if (intensity <= 0.0) return color; + + // Convert UV from [0,1] to [-1,1] range, centered at (0.5, 0.5) + vec2 centered = uv * 2.0 - 1.0; + + // Calculate distance from center (circular vignette) + float dist = length(centered); + + // Smooth vignette falloff + float vignette = smoothstep(1.0, 0.4, dist * (1.0 + intensity)); + + // Apply vignette - darker at edges + return color * mix(0.3, 1.0, vignette * (1.0 - intensity * 0.5) + intensity * 0.5); +} + +// Pseudo-random function for film grain +float random(vec2 uv) { + return fract(sin(dot(uv, vec2(12.9898, 78.233))) * 43758.5453); +} + +// LUT-based color grading using a 3D lookup texture. +// Input color should be in [0,1] range (post-tonemapping). +vec3 applyColorGrading(vec3 color, float intensity) { + if (intensity <= 0.0) return color; + + // Clamp to valid LUT range and apply half-texel offset for correct sampling + vec3 lutCoord = clamp(color, 0.0, 1.0); + + // Scale and bias for correct 3D LUT sampling (avoid edge texels) + const float LUT_SIZE = 32.0; + lutCoord = lutCoord * ((LUT_SIZE - 1.0) / LUT_SIZE) + 0.5 / LUT_SIZE; + + vec3 graded = texture(uColorLUT, lutCoord).rgb; + return mix(color, graded, intensity); +} + +// Film grain effect - adds animated noise +vec3 applyFilmGrain(vec3 color, vec2 uv, float intensity, float time) { + if (intensity <= 0.0) return color; + + // Generate grain using UV and time for animation + float grain = random(uv + time * 0.01); + + // Convert to signed noise centered around 0 + grain = (grain - 0.5) * 2.0; + + // Apply grain with intensity - subtle effect + return color + grain * intensity * 0.05; +} + void main() { vec3 hdrColor = texture(uHDRBuffer, inUV).rgb; @@ -125,5 +182,16 @@ void main() { color = ACESFilm(hdrColor * global.pbr_params.y); } + // Apply LUT-based color grading (after tone mapping, in [0,1] range) + if (postParams.colorGradingEnabled > 0.5) { + color = applyColorGrading(color, postParams.colorGradingIntensity); + } + + // Apply vignette effect + color = applyVignette(color, inUV, postParams.vignetteIntensity); + + // Apply film grain effect + color = applyFilmGrain(color, inUV, postParams.filmGrainIntensity, global.params.x); + outColor = vec4(color, 1.0); } diff --git a/assets/shaders/vulkan/terrain.frag b/assets/shaders/vulkan/terrain.frag index 480adad3..23d64aaa 100644 --- a/assets/shaders/vulkan/terrain.frag +++ b/assets/shaders/vulkan/terrain.frag @@ -32,6 +32,8 @@ layout(set = 0, binding = 0) uniform GlobalUniforms { vec4 pbr_params; // x = pbr_quality, y = exposure, z = saturation, w = ssao_strength vec4 volumetric_params; // x = enabled, y = density, z = steps, w = scattering vec4 viewport_size; // xy = width/height + vec4 lpv_params; // x = enabled, y = intensity, z = cell_size, w = grid_size + vec4 lpv_origin; // xyz = world origin } global; // Constants @@ -55,15 +57,16 @@ float cloudNoise(vec2 p) { return mix(mix(a, b, u.x), mix(c, d, u.x), u.y); } -float cloudFbm(vec2 p) { - float v = 0.0; - float a = 0.5; - for (int i = 0; i < 2; i++) { - v += a * cloudNoise(p); - p *= 2.0; - a *= 0.5; +float cloudFbm(vec2 p, int octaves) { + float value = 0.0; + float amplitude = 0.5; + float frequency = 1.0; + for (int i = 0; i < octaves; i++) { + value += amplitude * cloudNoise(p * frequency); + amplitude *= 0.5; + frequency *= 2.0; } - return v; + return value; } // 4x4 Bayer matrix for dithered LOD transitions @@ -80,10 +83,14 @@ float bayerDither4x4(vec2 position) { } float getCloudShadow(vec3 worldPos, vec3 sunDir) { + const float cloudBlockSize = 12.0; vec3 actualWorldPos = worldPos + global.cam_pos.xyz; vec2 shadowOffset = sunDir.xz * (global.cloud_params.x - actualWorldPos.y) / max(sunDir.y, 0.1); - vec2 samplePos = (actualWorldPos.xz + shadowOffset + global.cloud_wind_offset.xy) * global.cloud_wind_offset.z; - float cloudValue = cloudFbm(samplePos * 0.5); + vec2 worldXZ = actualWorldPos.xz + shadowOffset + global.cloud_wind_offset.xy; + // Apply block quantization to match cloud rendering + vec2 pixelPos = floor(worldXZ / cloudBlockSize) * cloudBlockSize; + vec2 samplePos = pixelPos * global.cloud_wind_offset.z; + float cloudValue = cloudFbm(samplePos, 3); float threshold = 1.0 - global.cloud_wind_offset.w; float cloudMask = smoothstep(threshold - 0.1, threshold + 0.1, cloudValue); return cloudMask * global.lighting.w; @@ -95,11 +102,15 @@ layout(set = 0, binding = 7) uniform sampler2D uRoughnessMap; // Roughness ma layout(set = 0, binding = 8) uniform sampler2D uDisplacementMap; // Displacement map (unused for now) layout(set = 0, binding = 9) uniform sampler2D uEnvMap; // Environment Map (EXR) layout(set = 0, binding = 10) uniform sampler2D uSSAOMap; // SSAO Map +layout(set = 0, binding = 11) uniform sampler3D uLPVGrid; // LPV SH Red channel (4 SH coefficients) +layout(set = 0, binding = 12) uniform sampler3D uLPVGridG; // LPV SH Green channel +layout(set = 0, binding = 13) uniform sampler3D uLPVGridB; // LPV SH Blue channel layout(set = 0, binding = 2) uniform ShadowUniforms { - mat4 light_space_matrices[3]; + mat4 light_space_matrices[4]; vec4 cascade_splits; vec4 shadow_texel_sizes; + vec4 shadow_params; // x = light_size (PCSS), y/z/w reserved } shadows; layout(set = 0, binding = 3) uniform sampler2DArrayShadow uShadowMaps; @@ -136,18 +147,19 @@ float interleavedGradientNoise(vec2 fragCoord) { return fract(magic.z * fract(dot(fragCoord.xy, magic.xy))); } -float findBlocker(vec2 uv, float zReceiver, int layer) { +// PCSS blocker search using Poisson disk for better spatial distribution. +// searchRadius is derived from light size and receiver depth in light-space. +float findBlocker(vec2 uv, float zReceiver, int layer, float searchRadius, mat2 rot) { float blockerDepthSum = 0.0; int numBlockers = 0; - float searchRadius = 0.0015; - for (int i = -1; i <= 1; i++) { - for (int j = -1; j <= 1; j++) { - vec2 offset = vec2(i, j) * searchRadius; - float depth = texture(uShadowMapsRegular, vec3(uv + offset, float(layer))).r; - if (depth > zReceiver + 0.0001) { - blockerDepthSum += depth; - numBlockers++; - } + // Use first 8 Poisson samples for blocker search (cheaper than full 16) + for (int i = 0; i < 8; i++) { + vec2 offset = (rot * poissonDisk16[i]) * searchRadius; + float depth = texture(uShadowMapsRegular, vec3(uv + offset, float(layer))).r; + // Reverse-Z: blockers have GREATER depth than receiver + if (depth > zReceiver + 0.0002) { + blockerDepthSum += depth; + numBlockers++; } } if (numBlockers == 0) return -1.0; @@ -172,24 +184,46 @@ float computeShadowFactor(vec3 fragPosWorld, vec3 N, vec3 L, int layer) { float tanTheta = sinTheta / NdotL; // Reverse-Z Bias: push fragment CLOSER to light (towards Near=1.0) - const float BASE_BIAS = 0.0015; + const float BASE_BIAS = 0.0025; const float SLOPE_BIAS = 0.003; - const float MAX_BIAS = 0.012; + const float MAX_BIAS = 0.015; float bias = BASE_BIAS * cascadeScale + SLOPE_BIAS * min(tanTheta, 5.0) * cascadeScale; bias = min(bias, MAX_BIAS); if (vTileID < 0) bias = max(bias, 0.006 * cascadeScale); + // Noise rotation for temporal stability float angle = interleavedGradientNoise(gl_FragCoord.xy) * PI * 0.25; float s = sin(angle); - float c = cos(angle); - mat2 rot = mat2(c, s, -s, c); - + float co = cos(angle); + mat2 rot = mat2(co, s, -s, co); + + // PCSS: Percentage-Closer Soft Shadows + // lightSize in shadow-map UV space, scaled per cascade + float lightSize = shadows.shadow_params.x * texelSize; + const float MIN_RADIUS = 0.0005; + const float MAX_RADIUS = 0.008; + + // Step 1: Blocker search with light-size-proportional search radius + float searchRadius = lightSize * 2.0 * cascadeScale; + searchRadius = clamp(searchRadius, MIN_RADIUS, MAX_RADIUS); + float avgBlockerDepth = findBlocker(projCoords.xy, currentDepth, layer, searchRadius, rot); + + float radius; + if (avgBlockerDepth < 0.0) { + // No blockers found — use minimum PCF radius for contact hardening + radius = MIN_RADIUS * cascadeScale; + } else { + // Step 2: Penumbra estimation + // Reverse-Z: blocker depth > receiver depth means blocker is closer to light + float penumbraWidth = (avgBlockerDepth - currentDepth) / max(avgBlockerDepth, 0.0001) * lightSize; + radius = clamp(penumbraWidth * cascadeScale, MIN_RADIUS * cascadeScale, MAX_RADIUS * cascadeScale); + } + + // Step 3: Variable-radius PCF filtering float shadow = 0.0; - float radius = 0.0015 * cascadeScale; for (int i = 0; i < 16; i++) { vec2 offset = (rot * poissonDisk16[i]) * radius; - // GREATER_OR_EQUAL comparison: returns 1.0 if (currentDepth + bias) >= mapDepth shadow += texture(uShadowMaps, vec4(projCoords.xy + offset, float(layer), currentDepth + bias)); } // shadow factor: 1.0 (Shadowed) to 0.0 (Lit) @@ -271,6 +305,44 @@ vec3 computeIBLAmbient(vec3 N, float roughness) { return textureLod(uEnvMap, envUV, envMipLevel).rgb; } +// SH L1 constants for irradiance reconstruction +const float LPV_SH_C0 = 0.282095; +const float LPV_SH_C1 = 0.488603; + +// Evaluate SH L1 irradiance for a given direction +float evaluateLPVSH(vec4 sh, vec3 dir) { + return max(0.0, sh.x * LPV_SH_C0 + sh.y * LPV_SH_C1 * dir.x + sh.z * LPV_SH_C1 * dir.y + sh.w * LPV_SH_C1 * dir.z); +} + +// Sample the native 3D LPV SH grid and reconstruct directional irradiance using surface normal. +vec3 sampleLPVAtlas(vec3 worldPos, vec3 normal) { + if (global.lpv_params.x < 0.5) return vec3(0.0); + + float gridSize = max(global.lpv_params.w, 1.0); + float cellSize = max(global.lpv_params.z, 0.001); + vec3 local = (worldPos - global.lpv_origin.xyz) / cellSize; + + if (any(lessThan(local, vec3(0.0))) || any(greaterThanEqual(local, vec3(gridSize)))) { + return vec3(0.0); + } + + // Normalize to [0,1] UV range for hardware trilinear sampling + vec3 uvw = (local + 0.5) / gridSize; + + // Sample 4 SH coefficients per color channel + vec4 sh_r = texture(uLPVGrid, uvw); + vec4 sh_g = texture(uLPVGridG, uvw); + vec4 sh_b = texture(uLPVGridB, uvw); + + // Reconstruct directional irradiance using the surface normal + float irr_r = evaluateLPVSH(sh_r, normal); + float irr_g = evaluateLPVSH(sh_g, normal); + float irr_b = evaluateLPVSH(sh_b, normal); + + // Clamp to prevent overexposure from accumulated SH values + return clamp(vec3(irr_r, irr_g, irr_b) * global.lpv_params.y, vec3(0.0), vec3(2.0)); +} + vec3 computeBRDF(vec3 albedo, vec3 N, vec3 V, vec3 L, float roughness) { vec3 H = normalize(V + L); vec3 F0 = mix(vec3(DIELECTRIC_F0), albedo, 0.0); @@ -301,14 +373,16 @@ vec3 computePBR(vec3 albedo, vec3 N, vec3 V, vec3 L, float roughness, float tota vec3 Lo = brdf * sunColor * NdotL_final * (1.0 - totalShadow); vec3 envColor = computeIBLAmbient(N, roughness); float shadowAmbientFactor = mix(1.0, 0.2, totalShadow); - vec3 ambientColor = albedo * (max(min(envColor, IBL_CLAMP) * skyLight * 0.8, vec3(global.lighting.x * 0.8)) + blockLight) * ao * ssao * shadowAmbientFactor; + vec3 indirect = sampleLPVAtlas(vFragPosWorld, N); + vec3 ambientColor = albedo * (max(min(envColor, IBL_CLAMP) * skyLight * 0.8, vec3(global.lighting.x * 0.8)) + blockLight + indirect) * ao * ssao * shadowAmbientFactor; return ambientColor + Lo; } vec3 computeNonPBR(vec3 albedo, vec3 N, float nDotL, float totalShadow, float skyLight, vec3 blockLight, float ao, float ssao) { vec3 envColor = computeIBLAmbient(N, NON_PBR_ROUGHNESS); float shadowAmbientFactor = mix(1.0, 0.2, totalShadow); - vec3 ambientColor = albedo * (max(min(envColor, IBL_CLAMP) * skyLight * 0.8, vec3(global.lighting.x * 0.8)) + blockLight) * ao * ssao * shadowAmbientFactor; + vec3 indirect = sampleLPVAtlas(vFragPosWorld, N); + vec3 ambientColor = albedo * (max(min(envColor, IBL_CLAMP) * skyLight * 0.8, vec3(global.lighting.x * 0.8)) + blockLight + indirect) * ao * ssao * shadowAmbientFactor; vec3 sunColor = global.sun_color.rgb * global.params.w * SUN_RADIANCE_TO_IRRADIANCE / PI; vec3 directColor = albedo * sunColor * nDotL * (1.0 - totalShadow); return ambientColor + directColor; @@ -316,7 +390,8 @@ vec3 computeNonPBR(vec3 albedo, vec3 N, float nDotL, float totalShadow, float sk vec3 computeLOD(vec3 albedo, float nDotL, float totalShadow, float skyLightVal, vec3 blockLight, float ao, float ssao) { float shadowAmbientFactor = mix(1.0, 0.2, totalShadow); - vec3 ambientColor = albedo * (max(vec3(skyLightVal * 0.8), vec3(global.lighting.x * 0.4)) + blockLight) * ao * ssao * shadowAmbientFactor; + vec3 indirect = sampleLPVAtlas(vFragPosWorld, vec3(0.0, 1.0, 0.0)); // LOD uses up-facing normal + vec3 ambientColor = albedo * (max(vec3(skyLightVal * 0.8), vec3(global.lighting.x * 0.4)) + blockLight + indirect) * ao * ssao * shadowAmbientFactor; vec3 sunColor = global.sun_color.rgb * global.params.w * SUN_VOLUMETRIC_INTENSITY / PI; vec3 directColor = albedo * sunColor * nDotL * (1.0 - totalShadow); return ambientColor + directColor; @@ -400,7 +475,14 @@ void main() { vec3 L = normalize(global.sun_dir.xyz); float nDotL = max(dot(N, L), 0.0); - int layer = viewDistance < shadows.cascade_splits[0] ? 0 : (viewDistance < shadows.cascade_splits[1] ? 1 : 2); + // NaN guard: if cascade_splits contain NaN, comparisons return false and + // we fall through to cascade 2 (widest). Also guard against zero/negative + // splits which indicate uninitialized or invalid cascade data. + int layer = 2; + if (shadows.cascade_splits[0] > 0.0 && shadows.cascade_splits[1] > 0.0) { + layer = viewDistance < shadows.cascade_splits[0] ? 0 + : (viewDistance < shadows.cascade_splits[1] ? 1 : 2); + } float shadowFactor = computeShadowCascades(vFragPosWorld, N, L, viewDistance, layer); float cloudShadow = (global.cloud_params.w > 0.5 && global.params.w > 0.05 && global.sun_dir.y > 0.05) ? getCloudShadow(vFragPosWorld, global.sun_dir.xyz) : 0.0; diff --git a/assets/shaders/vulkan/terrain.frag.spv b/assets/shaders/vulkan/terrain.frag.spv index 3f523a8c..1ca94fd6 100644 Binary files a/assets/shaders/vulkan/terrain.frag.spv and b/assets/shaders/vulkan/terrain.frag.spv differ diff --git a/assets/shaders/vulkan/terrain.vert b/assets/shaders/vulkan/terrain.vert index 137b879f..54d42ec2 100644 --- a/assets/shaders/vulkan/terrain.vert +++ b/assets/shaders/vulkan/terrain.vert @@ -39,6 +39,8 @@ layout(set = 0, binding = 0) uniform GlobalUniforms { vec4 pbr_params; // x = pbr_quality, y = exposure, z = saturation, w = ssao_strength vec4 volumetric_params; // x = enabled, y = density, z = steps, w = scattering vec4 viewport_size; // xy = width/height + vec4 lpv_params; // x = enabled, y = intensity, z = cell_size, w = grid_size + vec4 lpv_origin; // xyz = world origin } global; layout(push_constant) uniform ModelUniforms { @@ -69,7 +71,14 @@ void main() { vBlockLight = aBlockLight; vFragPosWorld = worldPos.xyz; - vViewDepth = vDistance; + // Calculate actual view-space Z depth for cascade selection + // This aligns with how CSM splits are calculated (view-space Z) + vec4 viewPos = global.view_proj * vec4(worldPos.xyz, 1.0); + // In reverse-Z, view-space Z increases as we go deeper into the scene + // We need the actual view-space depth, not clip-space + // Transform world position to view space + vec3 toCamera = worldPos.xyz - global.cam_pos.xyz; + vViewDepth = length(toCamera); vAO = aAO; vMaskRadius = model_data.mask_radius; diff --git a/assets/shaders/vulkan/terrain.vert.spv b/assets/shaders/vulkan/terrain.vert.spv index d88a1f5e..dc977470 100644 Binary files a/assets/shaders/vulkan/terrain.vert.spv and b/assets/shaders/vulkan/terrain.vert.spv differ diff --git a/assets/textures/default/lava.png b/assets/textures/default/lava.png new file mode 100644 index 00000000..dcc4906d Binary files /dev/null and b/assets/textures/default/lava.png differ diff --git a/assets/textures/default/torch.png b/assets/textures/default/torch.png new file mode 100644 index 00000000..bed8a777 Binary files /dev/null and b/assets/textures/default/torch.png differ diff --git a/build.zig b/build.zig index 63401c76..d1a85430 100644 --- a/build.zig +++ b/build.zig @@ -52,7 +52,7 @@ pub fn build(b: *std.Build) void { b.installArtifact(exe); - const shader_cmd = b.addSystemCommand(&.{ "sh", "-c", "for f in assets/shaders/vulkan/*.vert assets/shaders/vulkan/*.frag; do glslangValidator -V \"$f\" -o \"$f.spv\"; done" }); + const shader_cmd = b.addSystemCommand(&.{ "sh", "-c", "for f in assets/shaders/vulkan/*.vert assets/shaders/vulkan/*.frag assets/shaders/vulkan/*.comp; do glslangValidator -V \"$f\" -o \"$f.spv\"; done" }); const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); @@ -176,6 +176,8 @@ pub fn build(b: *std.Build) void { const validate_vulkan_ssao_frag = b.addSystemCommand(&.{ "glslangValidator", "-V", "assets/shaders/vulkan/ssao.frag" }); const validate_vulkan_ssao_blur_frag = b.addSystemCommand(&.{ "glslangValidator", "-V", "assets/shaders/vulkan/ssao_blur.frag" }); const validate_vulkan_g_pass_frag = b.addSystemCommand(&.{ "glslangValidator", "-V", "assets/shaders/vulkan/g_pass.frag" }); + const validate_vulkan_lpv_inject_comp = b.addSystemCommand(&.{ "glslangValidator", "-V", "assets/shaders/vulkan/lpv_inject.comp" }); + const validate_vulkan_lpv_propagate_comp = b.addSystemCommand(&.{ "glslangValidator", "-V", "assets/shaders/vulkan/lpv_propagate.comp" }); test_step.dependOn(&validate_vulkan_terrain_vert.step); test_step.dependOn(&validate_vulkan_terrain_frag.step); @@ -195,4 +197,6 @@ pub fn build(b: *std.Build) void { test_step.dependOn(&validate_vulkan_ssao_frag.step); test_step.dependOn(&validate_vulkan_ssao_blur_frag.step); test_step.dependOn(&validate_vulkan_g_pass_frag.step); + test_step.dependOn(&validate_vulkan_lpv_inject_comp.step); + test_step.dependOn(&validate_vulkan_lpv_propagate_comp.step); } diff --git a/docs/refactoring/PR1_DEVICE_SWAPCHAIN.md b/docs/refactoring/PR1_DEVICE_SWAPCHAIN.md new file mode 100644 index 00000000..281c480a --- /dev/null +++ b/docs/refactoring/PR1_DEVICE_SWAPCHAIN.md @@ -0,0 +1,200 @@ +# PR 1: Pipeline + Render Pass Extraction + +**Status:** 🔄 In Progress +**Branch:** `refactor/pr1-pipeline-renderpass` +**Replaces:** Original "Device + Swapchain" plan (already modular) + +## Overview + +Extract pipeline and render pass management from `rhi_vulkan.zig` into dedicated managers. This is the first step in eliminating the god object anti-pattern. + +**Note:** Device and swapchain are already well-modularized in `vulkan_device.zig` and `vulkan_swapchain.zig`. This PR focuses on the next layer: pipelines and render passes. + +## Goals + +1. ✅ Create `PipelineManager` to encapsulate all pipeline creation and management +2. ✅ Create `RenderPassManager` to encapsulate render pass and framebuffer management +3. 🔄 Integrate managers into `rhi_vulkan.zig` +4. 🔄 Remove pipeline/renderpass fields from `VulkanContext` +5. 🔄 Reduce `rhi_vulkan.zig` by ~1,200 lines + +## Files + +### New Files (Created ✓) +- `src/engine/graphics/vulkan/pipeline_manager.zig` (~650 lines) +- `src/engine/graphics/vulkan/render_pass_manager.zig` (~550 lines) + +### Modified Files +- `src/engine/graphics/rhi_vulkan.zig` (Integration in progress) + +## Current Status + +### ✅ Completed +- [x] Created `PipelineManager` with all pipeline types: + - Terrain, wireframe, selection, line pipelines + - G-Pass pipeline + - Sky pipeline + - UI pipelines (colored + textured) + - Cloud pipeline + - Debug shadow pipeline (conditional) + - All pipeline layouts + +- [x] Created `RenderPassManager` with: + - HDR render pass (with MSAA support) + - G-Pass render pass + - Post-process render pass + - UI swapchain render pass + - Framebuffer management for all passes + +### 🔄 In Progress +- [ ] Add manager fields to `VulkanContext`: + ```zig + pipeline_manager: PipelineManager, + render_pass_manager: RenderPassManager, + ``` + +- [ ] Replace inline creation calls: + ```zig + // Before: + try createMainRenderPass(ctx); + try createMainPipelines(ctx); + + // After: + try ctx.render_pass_manager.createMainRenderPass(vk_device, extent, msaa_samples); + try ctx.pipeline_manager.createMainPipelines(allocator, vk_device, render_pass, g_render_pass, msaa_samples); + ``` + +- [ ] Update all field accesses: + ```zig + // Before: + ctx.pipeline + ctx.hdr_render_pass + ctx.main_framebuffer + + // After: + ctx.pipeline_manager.terrain_pipeline + ctx.render_pass_manager.hdr_render_pass + ctx.render_pass_manager.main_framebuffer + ``` + +### 📋 Remaining +- [ ] Update cleanup code to use manager `deinit()` methods +- [ ] Remove old pipeline/renderpass fields from VulkanContext +- [ ] Remove old creation/destruction functions from rhi_vulkan.zig +- [ ] Run full test suite + +## Fields to Remove from VulkanContext + +### Pipeline Fields (~15 fields) +- `pipeline_layout` → Use `pipeline_manager.pipeline_layout` +- `pipeline` → Use `pipeline_manager.terrain_pipeline` +- `wireframe_pipeline` → Use `pipeline_manager.wireframe_pipeline` +- `selection_pipeline` → Use `pipeline_manager.selection_pipeline` +- `line_pipeline` → Use `pipeline_manager.line_pipeline` +- `sky_pipeline` → Use `pipeline_manager.sky_pipeline` +- `sky_pipeline_layout` → Use `pipeline_manager.sky_pipeline_layout` +- `ui_pipeline` → Use `pipeline_manager.ui_pipeline` +- `ui_pipeline_layout` → Use `pipeline_manager.ui_pipeline_layout` +- `ui_tex_pipeline` → Use `pipeline_manager.ui_tex_pipeline` +- `ui_tex_pipeline_layout` → Use `pipeline_manager.ui_tex_pipeline_layout` +- `ui_swapchain_pipeline` → Use `pipeline_manager.ui_swapchain_pipeline` +- `ui_swapchain_tex_pipeline` → Use `pipeline_manager.ui_swapchain_tex_pipeline` +- `cloud_pipeline` → Use `pipeline_manager.cloud_pipeline` +- `cloud_pipeline_layout` → Use `pipeline_manager.cloud_pipeline_layout` +- `g_pipeline` → Use `pipeline_manager.g_pipeline` +- `g_pipeline_layout` → Use `pipeline_manager.pipeline_layout` (shares main layout) + +### Render Pass Fields (~8 fields) +- `hdr_render_pass` → Use `render_pass_manager.hdr_render_pass` +- `g_render_pass` → Use `render_pass_manager.g_render_pass` +- `post_process_render_pass` → Use `render_pass_manager.post_process_render_pass` +- `ui_swapchain_render_pass` → Use `render_pass_manager.ui_swapchain_render_pass` +- `main_framebuffer` → Use `render_pass_manager.main_framebuffer` +- `g_framebuffer` → Use `render_pass_manager.g_framebuffer` +- `post_process_framebuffers` → Use `render_pass_manager.post_process_framebuffers` +- `ui_swapchain_framebuffers` → Use `render_pass_manager.ui_swapchain_framebuffers` + +**Total fields removed:** ~23 fields + +## Testing Checklist + +- [ ] `nix develop --command zig build` compiles +- [ ] `nix develop --command zig build test` passes +- [ ] `nix develop --command zig build test-integration` passes +- [ ] Manual test: Application runs and renders correctly +- [ ] Manual test: Window resize works (tests swapchain recreation) +- [ ] Manual test: MSAA toggle works (tests pipeline recreation) +- [ ] Manual test: All rendering features work (shadows, SSAO, bloom, FXAA) + +## Migration Path + +### Step 1: Add Managers +```zig +const VulkanContext = struct { + // Existing subsystems (keep these): + vulkan_device: VulkanDevice, + swapchain: SwapchainPresenter, + resources: ResourceManager, + frames: FrameManager, + descriptors: DescriptorManager, + + // NEW: Add managers + pipeline_manager: PipelineManager, + render_pass_manager: RenderPassManager, + + // ... other fields +}; +``` + +### Step 2: Initialize Managers +```zig +fn initContext(...) !void { + // Existing initialization: + ctx.vulkan_device = try VulkanDevice.init(allocator, ctx.window); + ctx.swapchain = try SwapchainPresenter.init(...); + // ... + + // NEW: Initialize managers + ctx.pipeline_manager = try PipelineManager.init(&ctx.vulkan_device, &ctx.descriptors, null); + ctx.render_pass_manager = RenderPassManager.init(allocator); +} +``` + +### Step 3: Use Managers +```zig +// Creating resources: +try ctx.render_pass_manager.createMainRenderPass(vk_device, extent, msaa_samples); +try ctx.pipeline_manager.createMainPipelines(allocator, vk_device, + ctx.render_pass_manager.hdr_render_pass, + ctx.render_pass_manager.g_render_pass, + msaa_samples); + +// Accessing resources: +const pipeline = ctx.pipeline_manager.terrain_pipeline; +const render_pass = ctx.render_pass_manager.hdr_render_pass; +``` + +## Related PRs + +- PR 2: Post-Processing System Extraction (HDR, Bloom, FXAA consolidation) +- PR 3: UI Rendering System Extraction +- PR 4: Final Coordinator Refactor + +## Estimated Impact + +| Metric | Before | After | Change | +|--------|--------|-------|--------| +| rhi_vulkan.zig lines | 5,228 | ~4,000 | -1,200 | +| VulkanContext fields | ~100 | ~77 | -23 | +| New module lines | 0 | ~1,200 | +1,200 | +| **Net change** | | | **~0** (reorganization) | + +**Risk:** Medium (touches many rendering code paths, but changes are mechanical) + +## Notes + +- PipelineManager and RenderPassManager are already created and compile successfully +- Integration requires updating many references throughout rhi_vulkan.zig +- Each field access change is mechanical but there are many of them +- Consider using find/replace or refactoring tools for bulk changes + diff --git a/docs/refactoring/PR2_MANAGER_MIGRATION.md b/docs/refactoring/PR2_MANAGER_MIGRATION.md new file mode 100644 index 00000000..bcd3ce94 --- /dev/null +++ b/docs/refactoring/PR2_MANAGER_MIGRATION.md @@ -0,0 +1,109 @@ +# PR 2: Migrate to Pipeline and Render Pass Managers + +**Status:** 🔄 Draft (Incremental Commits) +**Branch:** `refactor/pr2-manager-migration` +**Depends on:** PR 1 (merged) + +## Overview + +Migrate `rhi_vulkan.zig` to actually **use** the PipelineManager and RenderPassManager created in PR1. This PR eliminates the duplication between the old inline functions and the new manager modules. + +## Goals + +1. Replace inline render pass creation with manager calls +2. Replace inline pipeline creation with manager calls +3. Update all field references to use managers +4. Remove ~800 lines of old code +5. Remove ~25 fields from VulkanContext + +## Incremental Commit Plan + +### Commit 1: Migrate HDR Render Pass Creation +- Replace `createMainRenderPass()` call with `ctx.render_pass_manager.createMainRenderPass()` +- Update `hdr_render_pass` references to use manager +- Remove old `createMainRenderPass()` function + +### Commit 2: Migrate G-Pass Render Pass Creation +- Replace G-Pass render pass creation in `createGPassResources()` +- Update `g_render_pass` references to use manager + +### Commit 3: Migrate Main Pipeline Creation +- Replace `createMainPipelines()` with `ctx.pipeline_manager.createMainPipelines()` +- Update terrain, wireframe, selection, line pipeline references +- Remove old `createMainPipelines()` function + +### Commit 4: Migrate UI and Cloud Pipeline Creation +- Update UI pipeline creation to use manager +- Update cloud pipeline creation to use manager +- Update swapchain UI pipeline creation + +### Commit 5: Cleanup Old Fields and Functions +- Remove old pipeline fields from VulkanContext +- Remove old render pass fields from VulkanContext +- Remove old creation/destruction functions +- Update any remaining references + +### Commit 6: Testing and Fixes +- Run full test suite +- Fix any regressions +- Final polish + +## Field Migration Map + +### Render Passes (moving to RenderPassManager) +```zig +// Before: +ctx.hdr_render_pass → ctx.render_pass_manager.hdr_render_pass +ctx.g_render_pass → ctx.render_pass_manager.g_render_pass +ctx.post_process_render_pass → ctx.render_pass_manager.post_process_render_pass +ctx.ui_swapchain_render_pass → ctx.render_pass_manager.ui_swapchain_render_pass + +// Framebuffers: +ctx.main_framebuffer → ctx.render_pass_manager.main_framebuffer +ctx.g_framebuffer → ctx.render_pass_manager.g_framebuffer +ctx.post_process_framebuffers → ctx.render_pass_manager.post_process_framebuffers +ctx.ui_swapchain_framebuffers → ctx.render_pass_manager.ui_swapchain_framebuffers +``` + +### Pipelines (moving to PipelineManager) +```zig +// Before: +ctx.pipeline → ctx.pipeline_manager.terrain_pipeline +ctx.wireframe_pipeline → ctx.pipeline_manager.wireframe_pipeline +ctx.selection_pipeline → ctx.pipeline_manager.selection_pipeline +ctx.line_pipeline → ctx.pipeline_manager.line_pipeline +ctx.sky_pipeline → ctx.pipeline_manager.sky_pipeline +ctx.g_pipeline → ctx.pipeline_manager.g_pipeline +ctx.ui_pipeline → ctx.pipeline_manager.ui_pipeline +ctx.ui_tex_pipeline → ctx.pipeline_manager.ui_tex_pipeline +ctx.cloud_pipeline → ctx.pipeline_manager.cloud_pipeline +ctx.ui_swapchain_pipeline → ctx.pipeline_manager.ui_swapchain_pipeline +ctx.ui_swapchain_tex_pipeline → ctx.pipeline_manager.ui_swapchain_tex_pipeline + +// Layouts: +ctx.pipeline_layout → ctx.pipeline_manager.pipeline_layout +ctx.sky_pipeline_layout → ctx.pipeline_manager.sky_pipeline_layout +ctx.ui_pipeline_layout → ctx.pipeline_manager.ui_pipeline_layout +ctx.ui_tex_pipeline_layout → ctx.pipeline_manager.ui_tex_pipeline_layout +ctx.cloud_pipeline_layout → ctx.pipeline_manager.cloud_pipeline_layout +``` + +## Testing Checklist + +Each commit must: +- [ ] `nix develop --command zig build` compiles +- [ ] `nix develop --command zig build test` passes +- [ ] Manual test: Application runs and renders + +## Expected Impact + +| Metric | Before | After | +|--------|--------|-------| +| rhi_vulkan.zig lines | 5,238 | ~4,400 | +| VulkanContext fields | ~100 | ~75 | +| Creation functions | 4 | 0 (all in managers) | + +## Related + +- PR 1: Created PipelineManager and RenderPassManager modules +- Issue #244: RHI Vulkan refactoring diff --git a/docs/refactoring/REFACTORING_PLAN.md b/docs/refactoring/REFACTORING_PLAN.md new file mode 100644 index 00000000..f267829d --- /dev/null +++ b/docs/refactoring/REFACTORING_PLAN.md @@ -0,0 +1,237 @@ +# RHI Vulkan Refactoring Plan + +**Issue:** [#244](https://github.com/OpenStaticFish/ZigCraft/issues/244) - Refactor rhi_vulkan.zig to eliminate god object anti-pattern + +**Goal:** Reduce `rhi_vulkan.zig` from 5,228 lines to <800 lines with <30 field `VulkanContext` + +**Current State:** +- File: `src/engine/graphics/rhi_vulkan.zig` +- Lines: 5,228 +- `VulkanContext` fields: ~100+ +- Anti-pattern: God object holding all Vulkan state + +**Target State:** +- File: `src/engine/graphics/rhi_vulkan.zig` +- Lines: <800 +- `VulkanContext` fields: <30 +- Pattern: Coordinator delegating to focused subsystems + +--- + +## PR Breakdown + +### PR 1: Pipeline + Render Pass Extraction +**Status:** 🔄 In Progress +**Scope:** Extract pipeline and render pass management into dedicated managers +**Note:** Device and swapchain are already modular; this PR focuses on pipelines/render passes + +**Files:** +- `src/engine/graphics/vulkan/pipeline_manager.zig` (CREATED ✓) +- `src/engine/graphics/vulkan/render_pass_manager.zig` (CREATED ✓) +- `src/engine/graphics/rhi_vulkan.zig` (Integration in progress) + +**Extraction Targets:** +- [x] Create PipelineManager with all pipeline types and layouts +- [x] Create RenderPassManager with all render passes and framebuffers +- [ ] Integrate managers into VulkanContext +- [ ] Replace all pipeline/renderpass field accesses +- [ ] Remove old fields and functions from rhi_vulkan.zig + +**Expected Reduction:** ~1,200 lines from rhi_vulkan.zig, ~23 fields from VulkanContext + +--- + +### PR 2: Render Passes + Pipelines +**Status:** 📋 Planned +**Scope:** Extract render pass and pipeline management + +**Files:** +- `src/engine/graphics/vulkan/pipeline_manager.zig` (CREATED ✓) +- `src/engine/graphics/vulkan/render_pass_manager.zig` (CREATED ✓) +- `src/engine/graphics/rhi_vulkan.zig` (REFACTOR) + +**Extraction Targets:** +- [ ] `createMainRenderPass()` - HDR render pass creation +- [ ] `createGPassResources()` - G-Pass render pass and images +- [ ] `createMainPipelines()` - All graphics pipelines +- [ ] `createSwapchainUIPipelines()` - UI pipelines +- [ ] Pipeline layout creation +- [ ] Framebuffer management + +**Expected Reduction:** ~1,200 lines from rhi_vulkan.zig + +--- + +### PR 3: Resource Management + Post-Processing +**Status:** 📋 Planned +**Scope:** Extract resource management and consolidate post-processing + +**Files:** +- `src/engine/graphics/vulkan/resource_manager.zig` (ENHANCE) +- `src/engine/graphics/vulkan/post_process_system.zig` (NEW) +- `src/engine/graphics/rhi_vulkan.zig` (REFACTOR) + +**Extraction Targets:** +- [ ] HDR resource management (images, views, memory) +- [ ] Post-process render pass and descriptors +- [ ] Bloom system coordination +- [ ] FXAA system coordination +- [ ] SSAO integration +- [ ] Texture/sampler management consolidation + +**Expected Reduction:** ~1,500 lines from rhi_vulkan.zig + +--- + +### PR 4: UI Rendering System +**Status:** 📋 Planned +**Scope:** Extract UI rendering into dedicated subsystem + +**Files:** +- `src/engine/graphics/vulkan/ui_rendering_system.zig` (NEW) +- `src/engine/graphics/rhi_vulkan.zig` (REFACTOR) + +**Extraction Targets:** +- [ ] UI VBO management +- [ ] UI pipeline binding +- [ ] UI descriptor set management +- [ ] 2D drawing functions (`begin2DPass`, `drawRect2D`, etc.) +- [ ] Textured UI rendering + +**Expected Reduction:** ~600 lines from rhi_vulkan.zig + +--- + +### PR 5: Final Coordinator Refactor +**Status:** 📋 Planned +**Scope:** Final cleanup and coordinator pattern implementation + +**Files:** +- `src/engine/graphics/rhi_vulkan.zig` (MAJOR REFACTOR) +- All subsystem files (MINOR UPDATES) + +**Tasks:** +- [ ] Reduce `VulkanContext` to ~25 fields +- [ ] Convert to pure coordinator (no direct Vulkan calls) +- [ ] Clean up imports and dead code +- [ ] Update documentation +- [ ] Verify all tests pass + +**Expected Final State:** +- rhi_vulkan.zig: <800 lines +- VulkanContext: <30 fields +- Clean separation of concerns + +--- + +## Subsystem Architecture + +``` +VulkanContext (Coordinator) +├── DeviceManager +│ ├── VulkanDevice (physical/logical device) +│ └── Device capabilities +├── SwapchainManager +│ ├── SwapchainPresenter +│ └── Present mode management +├── RenderPassManager +│ ├── HDR render pass +│ ├── G-Pass render pass +│ ├── Post-process render pass +│ └── UI render pass +├── PipelineManager +│ ├── Terrain pipeline +│ ├── Wireframe pipeline +│ ├── Selection pipeline +│ ├── Line pipeline +│ ├── G-Pass pipeline +│ ├── Sky pipeline +│ ├── UI pipelines +│ └── Cloud pipeline +├── ResourceManager (existing) +│ ├── Buffer management +│ ├── Texture management +│ └── Shader management +├── PostProcessSystem +│ ├── HDR resources +│ ├── BloomSystem +│ ├── FXAASystem +│ └── SSAOSystem +├── UIRenderingSystem +│ ├── UI VBOs +│ ├── UI pipelines +│ └── UI descriptors +├── FrameManager (existing) +├── DescriptorManager (existing) +├── ShadowSystem (existing) +└── Timing/Query system +``` + +--- + +## Testing Strategy + +Each PR must: +1. Compile without errors: `nix develop --command zig build` +2. Pass all unit tests: `nix develop --command zig build test` +3. Run integration test: `nix develop --command zig build test-integration` +4. Manual verification: Run application and verify rendering + +--- + +## Migration Guide + +### For PR 1 (Device + Swapchain): +```zig +// Before: +ctx.vulkan_device.init(...) +ctx.swapchain.recreate() + +// After: +ctx.device_manager.init(...) +ctx.swapchain_manager.recreate() +``` + +### For PR 2 (Render Passes + Pipelines): +```zig +// Before: +try createMainRenderPass(ctx); +try createMainPipelines(ctx); + +// After: +try ctx.render_pass_manager.createMainRenderPass(...); +try ctx.pipeline_manager.createMainPipelines(...); +``` + +### For PR 3 (Post-Processing): +```zig +// Before: +try createHDRResources(ctx); +ctx.bloom.init(...); + +// After: +try ctx.post_process_system.initHDR(...); +ctx.post_process_system.bloom.init(...); +``` + +--- + +## Progress Tracking + +| PR | Status | Lines Removed | Fields Removed | Tests Pass | +|----|--------|---------------|----------------|------------| +| 1 | 🔄 | - | - | - | +| 2 | 📋 | - | - | - | +| 3 | 📋 | - | - | - | +| 4 | 📋 | - | - | - | +| 5 | 📋 | - | - | - | +| **Total** | | **~4,400** | **~70** | **✓** | + +--- + +## Notes + +- Each PR should be reviewable independently +- No functional changes - purely structural refactoring +- Maintain backward compatibility with RHI interface +- Document any breaking changes in PR descriptions diff --git a/src/engine/core/log.zig b/src/engine/core/log.zig index f16a1f46..7e351374 100644 --- a/src/engine/core/log.zig +++ b/src/engine/core/log.zig @@ -1,6 +1,7 @@ //! Engine-wide logging system with severity levels. const std = @import("std"); +const builtin = @import("builtin"); pub const LogLevel = enum { trace, @@ -59,4 +60,4 @@ pub const Logger = struct { }; /// Global logger instance -pub var log = Logger.init(.debug); +pub var log = Logger.init(if (builtin.is_test) .err else .debug); diff --git a/src/engine/graphics/csm.zig b/src/engine/graphics/csm.zig index c036f57f..076a41a3 100644 --- a/src/engine/graphics/csm.zig +++ b/src/engine/graphics/csm.zig @@ -9,6 +9,37 @@ pub const ShadowCascades = struct { light_space_matrices: [CASCADE_COUNT]Mat4, cascade_splits: [CASCADE_COUNT]f32, texel_sizes: [CASCADE_COUNT]f32, + + /// Initialize with safe defaults (zero-initialized) + pub fn initZero() ShadowCascades { + return .{ + .light_space_matrices = .{Mat4.identity} ** CASCADE_COUNT, + .cascade_splits = .{0.0} ** CASCADE_COUNT, + .texel_sizes = .{0.0} ** CASCADE_COUNT, + }; + } + + /// Validate that all cascade data is finite and reasonable + pub fn isValid(self: ShadowCascades) bool { + for (0..CASCADE_COUNT) |i| { + // Check cascade splits are finite and increasing + if (!std.math.isFinite(self.cascade_splits[i])) return false; + if (self.cascade_splits[i] <= 0.0) return false; + if (i > 0 and self.cascade_splits[i] <= self.cascade_splits[i - 1]) return false; + + // Check texel sizes are finite and positive + if (!std.math.isFinite(self.texel_sizes[i])) return false; + if (self.texel_sizes[i] <= 0.0) return false; + + // Check light space matrices are finite + for (0..4) |row| { + for (0..4) |col| { + if (!std.math.isFinite(self.light_space_matrices[i].data[row][col])) return false; + } + } + } + return true; + } }; /// Computes stable cascaded shadow map matrices using texel snapping. @@ -26,21 +57,38 @@ pub const ShadowCascades = struct { /// - lambda=0.92 biases the split scheme toward logarithmic distribution. /// - min/max Z offsets are tuned to avoid clipping during camera motion. pub fn computeCascades(resolution: u32, camera_fov: f32, aspect: f32, near: f32, far: f32, sun_dir: Vec3, cam_view: Mat4, z_range_01: bool) ShadowCascades { - const lambda = 0.92; - const shadow_dist = far; + // Validate inputs to prevent division by zero + if (resolution == 0 or far <= near or near <= 0.0) { + return ShadowCascades.initZero(); + } - var cascades: ShadowCascades = .{ - .light_space_matrices = undefined, - .cascade_splits = undefined, - .texel_sizes = undefined, - }; + const shadow_dist = far; - // Calculate split distances (linear/log blend) - for (0..CASCADE_COUNT) |i| { - const p = @as(f32, @floatFromInt(i + 1)) / @as(f32, @floatFromInt(CASCADE_COUNT)); - const log_split = near * std.math.pow(f32, shadow_dist / near, p); - const lin_split = near + (shadow_dist - near) * p; - cascades.cascade_splits[i] = std.math.lerp(lin_split, log_split, lambda); + var cascades = ShadowCascades.initZero(); + + // Smart cascade split strategy based on shadow distance + // For large distances (>500), use fixed percentages for better coverage + // For smaller distances, use logarithmic distribution for better near-detail + const SMART_SPLIT_THRESHOLD: f32 = 500.0; + const use_fixed_splits = shadow_dist > SMART_SPLIT_THRESHOLD; + + if (use_fixed_splits) { + // Fixed percentage splits optimized for 4 cascades at large distances + // Splits at: 8%, 25%, 60%, 100% of shadow distance + // Gives cascade 0 more coverage for close-up detail (cave walls, etc.) + const split_ratios = [4]f32{ 0.08, 0.25, 0.60, 1.0 }; + for (0..CASCADE_COUNT) |i| { + cascades.cascade_splits[i] = shadow_dist * split_ratios[i]; + } + } else { + // Logarithmic splits for smaller distances (better near-detail) + const lambda = 0.92; + for (0..CASCADE_COUNT) |i| { + const p = @as(f32, @floatFromInt(i + 1)) / @as(f32, @floatFromInt(CASCADE_COUNT)); + const log_split = near * std.math.pow(f32, shadow_dist / near, p); + const lin_split = near + (shadow_dist - near) * p; + cascades.cascade_splits[i] = std.math.lerp(lin_split, log_split, lambda); + } } // Calculate matrices for each cascade @@ -82,9 +130,21 @@ pub fn computeCascades(resolution: u32, camera_fov: f32, aspect: f32, near: f32, // Stabilize ortho bounds by snapping center to texel grid // ONLY snap X and Y. Snapping Z causes depth range shifts and flickering. + // + // Use relative-to-integer-origin snapping to maintain float32 precision + // at large world coordinates. Without this, @floor(large_value / small_texel) + // produces a huge integer that loses precision when multiplied back. + // + // Decompose: center = integer_origin + fractional_offset + // Snap the fractional part to the texel grid (where float32 has full precision), + // then reconstruct: snapped = integer_origin + round_to_grid(fractional_offset) + const origin_x = @as(f32, @floatFromInt(@as(i32, @intFromFloat(center_ls.x)))); + const origin_y = @as(f32, @floatFromInt(@as(i32, @intFromFloat(center_ls.y)))); + const frac_x = center_ls.x - origin_x; + const frac_y = center_ls.y - origin_y; const center_snapped = Vec3.init( - @floor(center_ls.x / texel_size) * texel_size, - @floor(center_ls.y / texel_size) * texel_size, + origin_x + @floor(frac_x / texel_size) * texel_size, + origin_y + @floor(frac_y / texel_size) * texel_size, center_ls.z, ); @@ -126,9 +186,31 @@ pub fn computeCascades(resolution: u32, camera_fov: f32, aspect: f32, near: f32, last_split = split; } + // Validate results before returning - use runtime check instead of + // debug.assert so invalid data is caught in ReleaseFast builds too. + if (!cascades.isValid()) { + return ShadowCascades.initZero(); + } + return cascades; } +/// Validates cascade data and logs warnings if invalid +pub fn validateCascades(cascades: ShadowCascades, log_scope: anytype) bool { + if (cascades.isValid()) return true; + + log_scope.warn("Invalid shadow cascade data detected:", .{}); + for (0..CASCADE_COUNT) |i| { + if (!std.math.isFinite(cascades.cascade_splits[i])) { + log_scope.warn(" Cascade {} split is non-finite: {}", .{ i, cascades.cascade_splits[i] }); + } + if (!std.math.isFinite(cascades.texel_sizes[i])) { + log_scope.warn(" Cascade {} texel size is non-finite: {}", .{ i, cascades.texel_sizes[i] }); + } + } + return false; +} + test "computeCascades splits and texel sizes" { const cascades = computeCascades( 1024, diff --git a/src/engine/graphics/lpv_system.zig b/src/engine/graphics/lpv_system.zig new file mode 100644 index 00000000..83538c76 --- /dev/null +++ b/src/engine/graphics/lpv_system.zig @@ -0,0 +1,1126 @@ +const std = @import("std"); +const c = @import("../../c.zig").c; +const rhi_pkg = @import("rhi.zig"); +const Vec3 = @import("../math/vec3.zig").Vec3; +const World = @import("../../world/world.zig").World; +const CHUNK_SIZE_X = @import("../../world/chunk.zig").CHUNK_SIZE_X; +const CHUNK_SIZE_Y = @import("../../world/chunk.zig").CHUNK_SIZE_Y; +const CHUNK_SIZE_Z = @import("../../world/chunk.zig").CHUNK_SIZE_Z; +const block_registry = @import("../../world/block_registry.zig"); +const VulkanContext = @import("vulkan/rhi_context_types.zig").VulkanContext; +const Utils = @import("vulkan/utils.zig"); + +const MAX_LIGHTS_PER_UPDATE: usize = 2048; +// Approximate 1/7 spread for 6-neighbor propagation (close to 1/6 with extra damping) +// to keep indirect light stable and avoid runaway amplification. +const DEFAULT_PROPAGATION_FACTOR: f32 = 0.14; +// Retain 82% of center-cell energy so propagation does not over-blur local contrast. +const DEFAULT_CENTER_RETENTION: f32 = 0.82; +const INJECT_SHADER_PATH = "assets/shaders/vulkan/lpv_inject.comp.spv"; +const PROPAGATE_SHADER_PATH = "assets/shaders/vulkan/lpv_propagate.comp.spv"; + +const GpuLight = extern struct { + pos_radius: [4]f32, + color: [4]f32, +}; + +const InjectPush = extern struct { + grid_origin: [4]f32, + grid_params: [4]f32, + light_count: u32, + _pad0: [3]u32, +}; + +const PropagatePush = extern struct { + grid_size: u32, + _pad0: [3]u32, + propagation: [4]f32, +}; + +pub const LPVSystem = struct { + pub const Stats = struct { + updated_this_frame: bool = false, + light_count: u32 = 0, + cpu_update_ms: f32 = 0.0, + grid_size: u32 = 0, + propagation_iterations: u32 = 0, + update_interval_frames: u32 = 6, + }; + + allocator: std.mem.Allocator, + rhi: rhi_pkg.RHI, + vk_ctx: *VulkanContext, + + // SH L1: 3 textures per grid (R, G, B channels), each storing 4 SH coefficients as rgba32f + grid_textures_a: [3]rhi_pkg.TextureHandle = .{ 0, 0, 0 }, + grid_textures_b: [3]rhi_pkg.TextureHandle = .{ 0, 0, 0 }, + active_grid_textures: [3]rhi_pkg.TextureHandle = .{ 0, 0, 0 }, + debug_overlay_texture: rhi_pkg.TextureHandle = 0, + grid_size: u32, + cell_size: f32, + intensity: f32, + propagation_iterations: u32, + propagation_factor: f32, + center_retention: f32, + enabled: bool, + update_interval_frames: u32 = 6, + + origin: Vec3 = Vec3.zero, + current_frame: u32 = 0, + was_enabled_last_frame: bool = true, + debug_overlay_was_enabled: bool = false, + + image_layout_a: c.VkImageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + image_layout_b: c.VkImageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + + debug_overlay_pixels: []f32, + stats: Stats, + + light_buffer: Utils.VulkanBuffer = .{}, + occlusion_buffer: Utils.VulkanBuffer = .{}, + occlusion_grid: []u32 = &.{}, + + descriptor_pool: c.VkDescriptorPool = null, + inject_set_layout: c.VkDescriptorSetLayout = null, + propagate_set_layout: c.VkDescriptorSetLayout = null, + inject_descriptor_set: c.VkDescriptorSet = null, + propagate_ab_descriptor_set: c.VkDescriptorSet = null, + propagate_ba_descriptor_set: c.VkDescriptorSet = null, + inject_pipeline_layout: c.VkPipelineLayout = null, + propagate_pipeline_layout: c.VkPipelineLayout = null, + inject_pipeline: c.VkPipeline = null, + propagate_pipeline: c.VkPipeline = null, + + const GridResources = struct { + grid_textures_a: [3]rhi_pkg.TextureHandle = .{ 0, 0, 0 }, + grid_textures_b: [3]rhi_pkg.TextureHandle = .{ 0, 0, 0 }, + active_grid_textures: [3]rhi_pkg.TextureHandle = .{ 0, 0, 0 }, + debug_overlay_texture: rhi_pkg.TextureHandle = 0, + debug_overlay_pixels: []f32 = &.{}, + image_layout_a: c.VkImageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + image_layout_b: c.VkImageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + }; + + pub fn init( + allocator: std.mem.Allocator, + rhi: rhi_pkg.RHI, + grid_size: u32, + cell_size: f32, + intensity: f32, + propagation_iterations: u32, + enabled: bool, + ) !*LPVSystem { + const self = try allocator.create(LPVSystem); + errdefer allocator.destroy(self); + + const vk_ctx: *VulkanContext = @ptrCast(@alignCast(rhi.ptr)); + const clamped_grid = std.math.clamp(grid_size, 16, 64); + + self.* = .{ + .allocator = allocator, + .rhi = rhi, + .vk_ctx = vk_ctx, + .grid_size = clamped_grid, + .cell_size = @max(cell_size, 0.5), + .intensity = std.math.clamp(intensity, 0.0, 4.0), + .propagation_iterations = std.math.clamp(propagation_iterations, 1, 8), + .propagation_factor = DEFAULT_PROPAGATION_FACTOR, + .center_retention = DEFAULT_CENTER_RETENTION, + .enabled = enabled, + .was_enabled_last_frame = enabled, + .debug_overlay_pixels = &.{}, + .stats = .{ + .grid_size = clamped_grid, + .propagation_iterations = std.math.clamp(propagation_iterations, 1, 8), + .update_interval_frames = 6, + }, + }; + + try self.createGridTextures(); + errdefer self.destroyGridTextures(); + + const light_buffer_size = MAX_LIGHTS_PER_UPDATE * @sizeOf(GpuLight); + self.light_buffer = try Utils.createVulkanBuffer( + &vk_ctx.vulkan_device, + light_buffer_size, + c.VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + c.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | c.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + ); + errdefer self.destroyLightBuffer(); + + // Occlusion grid buffer: one u32 per cell (1 = opaque, 0 = transparent) + const occlusion_buffer_size = @as(usize, clamped_grid) * @as(usize, clamped_grid) * @as(usize, clamped_grid) * @sizeOf(u32); + self.occlusion_buffer = try Utils.createVulkanBuffer( + &vk_ctx.vulkan_device, + occlusion_buffer_size, + c.VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + c.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | c.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + ); + errdefer self.destroyOcclusionBuffer(); + + try ensureShaderFileExists(INJECT_SHADER_PATH); + try ensureShaderFileExists(PROPAGATE_SHADER_PATH); + + errdefer self.deinitComputeResources(); + try self.initComputeResources(); + + return self; + } + + pub fn deinit(self: *LPVSystem) void { + self.deinitComputeResources(); + self.destroyOcclusionBuffer(); + self.destroyLightBuffer(); + self.destroyGridTextures(); + self.allocator.destroy(self); + } + + pub fn setSettings(self: *LPVSystem, enabled: bool, intensity: f32, cell_size: f32, propagation_iterations: u32, grid_size: u32, update_interval_frames: u32) !void { + self.enabled = enabled; + self.intensity = std.math.clamp(intensity, 0.0, 4.0); + self.cell_size = @max(cell_size, 0.5); + self.propagation_iterations = std.math.clamp(propagation_iterations, 1, 8); + self.update_interval_frames = std.math.clamp(update_interval_frames, 1, 16); + self.stats.propagation_iterations = self.propagation_iterations; + self.stats.update_interval_frames = self.update_interval_frames; + + const clamped_grid = std.math.clamp(grid_size, 16, 64); + if (clamped_grid == self.grid_size) return; + + const old_resources = GridResources{ + .grid_textures_a = self.grid_textures_a, + .grid_textures_b = self.grid_textures_b, + .active_grid_textures = self.active_grid_textures, + .debug_overlay_texture = self.debug_overlay_texture, + .debug_overlay_pixels = self.debug_overlay_pixels, + .image_layout_a = self.image_layout_a, + .image_layout_b = self.image_layout_b, + }; + const old_grid_size = self.grid_size; + const old_stats_grid_size = self.stats.grid_size; + const old_origin = self.origin; + + const new_resources = try self.createGridResources(clamped_grid); + self.applyGridResources(new_resources); + self.grid_size = clamped_grid; + self.stats.grid_size = clamped_grid; + self.origin = Vec3.zero; + + errdefer { + var failed_new = GridResources{ + .grid_textures_a = self.grid_textures_a, + .grid_textures_b = self.grid_textures_b, + .active_grid_textures = self.active_grid_textures, + .debug_overlay_texture = self.debug_overlay_texture, + .debug_overlay_pixels = self.debug_overlay_pixels, + .image_layout_a = self.image_layout_a, + .image_layout_b = self.image_layout_b, + }; + self.destroyGridResources(&failed_new); + self.applyGridResources(old_resources); + self.grid_size = old_grid_size; + self.stats.grid_size = old_stats_grid_size; + self.origin = old_origin; + } + + self.buildDebugOverlay(&.{}, 0); + try self.uploadDebugOverlay(); + try self.updateDescriptorSets(); + + var old_to_destroy = old_resources; + self.destroyGridResources(&old_to_destroy); + } + + pub fn getTextureHandle(self: *const LPVSystem) rhi_pkg.TextureHandle { + return self.active_grid_textures[0]; // R channel (binding 11) + } + + pub fn getTextureHandleG(self: *const LPVSystem) rhi_pkg.TextureHandle { + return self.active_grid_textures[1]; // G channel (binding 12) + } + + pub fn getTextureHandleB(self: *const LPVSystem) rhi_pkg.TextureHandle { + return self.active_grid_textures[2]; // B channel (binding 13) + } + + pub fn getDebugOverlayTextureHandle(self: *const LPVSystem) rhi_pkg.TextureHandle { + return self.debug_overlay_texture; + } + + pub fn getStats(self: *const LPVSystem) Stats { + return self.stats; + } + + pub fn getOrigin(self: *const LPVSystem) Vec3 { + return self.origin; + } + + pub fn getGridSize(self: *const LPVSystem) u32 { + return self.grid_size; + } + + pub fn getCellSize(self: *const LPVSystem) f32 { + return self.cell_size; + } + + pub fn update(self: *LPVSystem, world: *World, camera_pos: Vec3, debug_overlay_enabled: bool) !void { + self.current_frame +%= 1; + var timer = std.time.Timer.start() catch unreachable; + self.stats.updated_this_frame = false; + self.stats.grid_size = self.grid_size; + self.stats.propagation_iterations = self.propagation_iterations; + self.stats.update_interval_frames = self.update_interval_frames; + + if (!self.enabled) { + self.active_grid_textures = self.grid_textures_a; + if (self.was_enabled_last_frame and debug_overlay_enabled) { + self.buildDebugOverlay(&.{}, 0); + try self.uploadDebugOverlay(); + } + self.was_enabled_last_frame = false; + self.debug_overlay_was_enabled = debug_overlay_enabled; + self.stats.light_count = 0; + self.stats.cpu_update_ms = 0.0; + return; + } + + const half_extent = (@as(f32, @floatFromInt(self.grid_size)) * self.cell_size) * 0.5; + const next_origin = Vec3.init( + quantizeToCell(camera_pos.x - half_extent, self.cell_size), + quantizeToCell(camera_pos.y - half_extent, self.cell_size), + quantizeToCell(camera_pos.z - half_extent, self.cell_size), + ); + + const moved = @abs(next_origin.x - self.origin.x) >= self.cell_size or + @abs(next_origin.y - self.origin.y) >= self.cell_size or + @abs(next_origin.z - self.origin.z) >= self.cell_size; + + const tick_update = (self.current_frame % self.update_interval_frames) == 0; + const debug_toggle_on = debug_overlay_enabled and !self.debug_overlay_was_enabled; + self.debug_overlay_was_enabled = debug_overlay_enabled; + + if (!moved and !tick_update and !debug_toggle_on and self.was_enabled_last_frame) { + self.stats.cpu_update_ms = 0.0; + return; + } + + self.origin = next_origin; + self.was_enabled_last_frame = true; + + var lights: [MAX_LIGHTS_PER_UPDATE]GpuLight = undefined; + const light_count = self.collectLights(world, lights[0..]); + if (self.light_buffer.mapped_ptr) |ptr| { + const bytes = std.mem.sliceAsBytes(lights[0..light_count]); + @memcpy(@as([*]u8, @ptrCast(ptr))[0..bytes.len], bytes); + } + + // Build occlusion grid for opaque block awareness during propagation + if (!self.buildOcclusionGrid(world)) { + const elapsed_ns = timer.read(); + const delta_ms: f32 = @floatCast(@as(f64, @floatFromInt(elapsed_ns)) / @as(f64, std.time.ns_per_ms)); + self.stats.light_count = @intCast(light_count); + self.stats.cpu_update_ms = delta_ms; + return; + } + + if (debug_overlay_enabled) { + // Keep debug overlay generation only when overlay is active. + self.buildDebugOverlay(lights[0..], light_count); + try self.uploadDebugOverlay(); + } + + try self.dispatchCompute(light_count); + + const elapsed_ns = timer.read(); + const delta_ms: f32 = @floatCast(@as(f64, @floatFromInt(elapsed_ns)) / @as(f64, std.time.ns_per_ms)); + self.stats.updated_this_frame = true; + self.stats.light_count = @intCast(light_count); + self.stats.cpu_update_ms = delta_ms; + } + + fn collectLights(self: *LPVSystem, world: *World, out: []GpuLight) usize { + const grid_world_size = @as(f32, @floatFromInt(self.grid_size)) * self.cell_size; + const min_x = self.origin.x; + const min_y = self.origin.y; + const min_z = self.origin.z; + const max_x = min_x + grid_world_size; + const max_y = min_y + grid_world_size; + const max_z = min_z + grid_world_size; + + var emitted_lights: usize = 0; + + world.storage.chunks_mutex.lockShared(); + defer world.storage.chunks_mutex.unlockShared(); + + var iter = world.storage.iteratorUnsafe(); + while (iter.next()) |entry| { + const chunk_data = entry.value_ptr.*; + const chunk = &chunk_data.chunk; + + const chunk_min_x = @as(f32, @floatFromInt(chunk.chunk_x * CHUNK_SIZE_X)); + const chunk_min_z = @as(f32, @floatFromInt(chunk.chunk_z * CHUNK_SIZE_Z)); + const chunk_max_x = chunk_min_x + @as(f32, @floatFromInt(CHUNK_SIZE_X)); + const chunk_max_z = chunk_min_z + @as(f32, @floatFromInt(CHUNK_SIZE_Z)); + + if (chunk_max_x < min_x or chunk_min_x > max_x or chunk_max_z < min_z or chunk_min_z > max_z) { + continue; + } + + var y: u32 = 0; + while (y < CHUNK_SIZE_Y) : (y += 1) { + var z: u32 = 0; + while (z < CHUNK_SIZE_Z) : (z += 1) { + var x: u32 = 0; + while (x < CHUNK_SIZE_X) : (x += 1) { + const block = chunk.getBlock(x, y, z); + if (block == .air) continue; + + const def = block_registry.getBlockDefinition(block); + const r_u4 = def.light_emission[0]; + const g_u4 = def.light_emission[1]; + const b_u4 = def.light_emission[2]; + if (r_u4 == 0 and g_u4 == 0 and b_u4 == 0) continue; + + const world_x = chunk_min_x + @as(f32, @floatFromInt(x)) + 0.5; + const world_y = @as(f32, @floatFromInt(y)) + 0.5; + const world_z = chunk_min_z + @as(f32, @floatFromInt(z)) + 0.5; + if (world_x < min_x or world_x >= max_x or world_y < min_y or world_y >= max_y or world_z < min_z or world_z >= max_z) { + continue; + } + + const emission_r = @as(f32, @floatFromInt(r_u4)) / 15.0; + const emission_g = @as(f32, @floatFromInt(g_u4)) / 15.0; + const emission_b = @as(f32, @floatFromInt(b_u4)) / 15.0; + const max_emission = @max(emission_r, @max(emission_g, emission_b)); + const radius_cells = @max(1.0, max_emission * 6.0); + + out[emitted_lights] = .{ + .pos_radius = .{ world_x, world_y, world_z, radius_cells }, + .color = .{ emission_r, emission_g, emission_b, 1.0 }, + }; + + emitted_lights += 1; + if (emitted_lights >= out.len) return emitted_lights; + } + } + } + } + + return emitted_lights; + } + + /// Build a per-cell occlusion grid (1 = opaque, 0 = transparent) for the current LPV volume. + /// Stored as packed u32 array where each u32 holds the opacity for one cell. + fn buildOcclusionGrid(self: *LPVSystem, world: *World) bool { + const gs = @as(usize, self.grid_size); + const total_cells = gs * gs * gs; + + // Ensure CPU buffer is allocated + if (self.occlusion_grid.len != total_cells) { + const new_grid = self.allocator.alloc(u32, total_cells) catch |err| { + std.log.err("LPV occlusion grid allocation failed ({} cells): {}", .{ total_cells, err }); + return false; + }; + if (self.occlusion_grid.len > 0) self.allocator.free(self.occlusion_grid); + self.occlusion_grid = new_grid; + } + + @memset(self.occlusion_grid, 0); + + world.storage.chunks_mutex.lockShared(); + defer world.storage.chunks_mutex.unlockShared(); + + const grid_world_size = @as(f32, @floatFromInt(self.grid_size)) * self.cell_size; + const min_x = self.origin.x; + const min_y = self.origin.y; + const min_z = self.origin.z; + const max_x = min_x + grid_world_size; + const max_z = min_z + grid_world_size; + + var iter = world.storage.iteratorUnsafe(); + while (iter.next()) |entry| { + const chunk_data = entry.value_ptr.*; + const chunk = &chunk_data.chunk; + + const chunk_min_x = @as(f32, @floatFromInt(chunk.chunk_x * CHUNK_SIZE_X)); + const chunk_min_z = @as(f32, @floatFromInt(chunk.chunk_z * CHUNK_SIZE_Z)); + const chunk_max_x = chunk_min_x + @as(f32, @floatFromInt(CHUNK_SIZE_X)); + const chunk_max_z = chunk_min_z + @as(f32, @floatFromInt(CHUNK_SIZE_Z)); + + if (chunk_max_x < min_x or chunk_min_x > max_x or chunk_max_z < min_z or chunk_min_z > max_z) { + continue; + } + + var y: u32 = 0; + while (y < CHUNK_SIZE_Y) : (y += 1) { + const world_y = @as(f32, @floatFromInt(y)) + 0.5; + if (world_y < min_y or world_y >= min_y + grid_world_size) continue; + + var z: u32 = 0; + while (z < CHUNK_SIZE_Z) : (z += 1) { + var x: u32 = 0; + while (x < CHUNK_SIZE_X) : (x += 1) { + const block = chunk.getBlock(x, y, z); + if (block == .air) continue; + + const def = block_registry.getBlockDefinition(block); + if (!def.isOpaque()) continue; + + const world_x = chunk_min_x + @as(f32, @floatFromInt(x)) + 0.5; + const world_z = chunk_min_z + @as(f32, @floatFromInt(z)) + 0.5; + + // Map world position to grid cell + const gx = @as(i32, @intFromFloat(@floor((world_x - self.origin.x) / self.cell_size))); + const gy = @as(i32, @intFromFloat(@floor((world_y - self.origin.y) / self.cell_size))); + const gz = @as(i32, @intFromFloat(@floor((world_z - self.origin.z) / self.cell_size))); + + if (gx < 0 or gy < 0 or gz < 0) continue; + const ugx = @as(usize, @intCast(gx)); + const ugy = @as(usize, @intCast(gy)); + const ugz = @as(usize, @intCast(gz)); + if (ugx >= gs or ugy >= gs or ugz >= gs) continue; + + const idx = ugx + ugy * gs + ugz * gs * gs; + self.occlusion_grid[idx] = 1; + } + } + } + } + + // Upload to GPU + if (self.occlusion_buffer.mapped_ptr) |ptr| { + const bytes = std.mem.sliceAsBytes(self.occlusion_grid); + @memcpy(@as([*]u8, @ptrCast(ptr))[0..bytes.len], bytes); + return true; + } + + std.log.err("LPV occlusion upload skipped: buffer is not mapped", .{}); + return false; + } + + fn createGridResources(self: *LPVSystem, grid_size: u32) !GridResources { + var resources = GridResources{}; + errdefer self.destroyGridResources(&resources); + + const empty = try self.allocator.alloc(f32, @as(usize, grid_size) * @as(usize, grid_size) * @as(usize, grid_size) * 4); + defer self.allocator.free(empty); + @memset(empty, 0.0); + const bytes = std.mem.sliceAsBytes(empty); + + const tex_config = rhi_pkg.TextureConfig{ + .min_filter = .linear, + .mag_filter = .linear, + .wrap_s = .clamp_to_edge, + .wrap_t = .clamp_to_edge, + .generate_mipmaps = false, + .is_render_target = false, + }; + + for (0..3) |ch| { + resources.grid_textures_a[ch] = try self.rhi.factory().createTexture3D( + grid_size, + grid_size, + grid_size, + .rgba32f, + tex_config, + bytes, + ); + + resources.grid_textures_b[ch] = try self.rhi.factory().createTexture3D( + grid_size, + grid_size, + grid_size, + .rgba32f, + tex_config, + bytes, + ); + } + + const debug_size = @as(usize, grid_size) * @as(usize, grid_size) * 4; + resources.debug_overlay_pixels = try self.allocator.alloc(f32, debug_size); + @memset(resources.debug_overlay_pixels, 0.0); + + resources.debug_overlay_texture = try self.rhi.createTexture( + grid_size, + grid_size, + .rgba32f, + .{ + .min_filter = .linear, + .mag_filter = .linear, + .wrap_s = .clamp_to_edge, + .wrap_t = .clamp_to_edge, + .generate_mipmaps = false, + .is_render_target = false, + }, + std.mem.sliceAsBytes(resources.debug_overlay_pixels), + ); + + resources.active_grid_textures = resources.grid_textures_a; + resources.image_layout_a = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + resources.image_layout_b = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + return resources; + } + + fn applyGridResources(self: *LPVSystem, resources: GridResources) void { + self.grid_textures_a = resources.grid_textures_a; + self.grid_textures_b = resources.grid_textures_b; + self.active_grid_textures = resources.active_grid_textures; + self.debug_overlay_texture = resources.debug_overlay_texture; + self.debug_overlay_pixels = resources.debug_overlay_pixels; + self.image_layout_a = resources.image_layout_a; + self.image_layout_b = resources.image_layout_b; + } + + fn destroyGridResources(self: *LPVSystem, resources: *GridResources) void { + for (0..3) |ch| { + if (resources.grid_textures_a[ch] != 0) { + self.rhi.destroyTexture(resources.grid_textures_a[ch]); + resources.grid_textures_a[ch] = 0; + } + if (resources.grid_textures_b[ch] != 0) { + self.rhi.destroyTexture(resources.grid_textures_b[ch]); + resources.grid_textures_b[ch] = 0; + } + } + + if (resources.debug_overlay_texture != 0) { + self.rhi.destroyTexture(resources.debug_overlay_texture); + resources.debug_overlay_texture = 0; + } + if (resources.debug_overlay_pixels.len > 0) { + self.allocator.free(resources.debug_overlay_pixels); + resources.debug_overlay_pixels = &.{}; + } + + resources.active_grid_textures = .{ 0, 0, 0 }; + } + + fn dispatchCompute(self: *LPVSystem, light_count: usize) !void { + const cmd = self.vk_ctx.frames.command_buffers[self.vk_ctx.frames.current_frame]; + if (cmd == null) return; + + // Transition all 6 SH channel textures (3 per grid) to GENERAL for compute access + for (0..3) |ch| { + const tex_a = self.vk_ctx.resources.textures.get(self.grid_textures_a[ch]) orelse return; + const tex_b = self.vk_ctx.resources.textures.get(self.grid_textures_b[ch]) orelse return; + try self.transitionImage(cmd, tex_a.image.?, self.image_layout_a, c.VK_IMAGE_LAYOUT_GENERAL, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, c.VK_ACCESS_SHADER_READ_BIT, c.VK_ACCESS_SHADER_READ_BIT | c.VK_ACCESS_SHADER_WRITE_BIT); + try self.transitionImage(cmd, tex_b.image.?, self.image_layout_b, c.VK_IMAGE_LAYOUT_GENERAL, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, c.VK_ACCESS_SHADER_READ_BIT, c.VK_ACCESS_SHADER_READ_BIT | c.VK_ACCESS_SHADER_WRITE_BIT); + } + self.image_layout_a = c.VK_IMAGE_LAYOUT_GENERAL; + self.image_layout_b = c.VK_IMAGE_LAYOUT_GENERAL; + + // Ensure host writes to light buffer and occlusion grid are visible to compute shaders. + // Both buffers use HOST_COHERENT, but we still need an execution dependency to guarantee + // the memcpy completes before the GPU reads the SSBOs. + var host_barrier = std.mem.zeroes(c.VkMemoryBarrier); + host_barrier.sType = c.VK_STRUCTURE_TYPE_MEMORY_BARRIER; + host_barrier.srcAccessMask = c.VK_ACCESS_HOST_WRITE_BIT; + host_barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + c.vkCmdPipelineBarrier(cmd, c.VK_PIPELINE_STAGE_HOST_BIT, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 1, &host_barrier, 0, null, 0, null); + + const groups = divCeil(self.grid_size, 4); + + const inject_push = InjectPush{ + .grid_origin = .{ self.origin.x, self.origin.y, self.origin.z, self.cell_size }, + .grid_params = .{ @floatFromInt(self.grid_size), 0.0, 0.0, 0.0 }, + .light_count = @intCast(light_count), + ._pad0 = .{ 0, 0, 0 }, + }; + + c.vkCmdBindPipeline(cmd, c.VK_PIPELINE_BIND_POINT_COMPUTE, self.inject_pipeline); + c.vkCmdBindDescriptorSets(cmd, c.VK_PIPELINE_BIND_POINT_COMPUTE, self.inject_pipeline_layout, 0, 1, &self.inject_descriptor_set, 0, null); + c.vkCmdPushConstants(cmd, self.inject_pipeline_layout, c.VK_SHADER_STAGE_COMPUTE_BIT, 0, @sizeOf(InjectPush), &inject_push); + c.vkCmdDispatch(cmd, groups, groups, groups); + + var mem_barrier = std.mem.zeroes(c.VkMemoryBarrier); + mem_barrier.sType = c.VK_STRUCTURE_TYPE_MEMORY_BARRIER; + mem_barrier.srcAccessMask = c.VK_ACCESS_SHADER_WRITE_BIT; + mem_barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT | c.VK_ACCESS_SHADER_WRITE_BIT; + c.vkCmdPipelineBarrier(cmd, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 1, &mem_barrier, 0, null, 0, null); + + c.vkCmdBindPipeline(cmd, c.VK_PIPELINE_BIND_POINT_COMPUTE, self.propagate_pipeline); + const prop_push = PropagatePush{ + .grid_size = self.grid_size, + ._pad0 = .{ 0, 0, 0 }, + .propagation = .{ self.propagation_factor, self.center_retention, 0, 0 }, + }; + + var use_ab = true; + var i: u32 = 0; + while (i < self.propagation_iterations) : (i += 1) { + const descriptor_set = if (use_ab) self.propagate_ab_descriptor_set else self.propagate_ba_descriptor_set; + c.vkCmdBindDescriptorSets(cmd, c.VK_PIPELINE_BIND_POINT_COMPUTE, self.propagate_pipeline_layout, 0, 1, &descriptor_set, 0, null); + c.vkCmdPushConstants(cmd, self.propagate_pipeline_layout, c.VK_SHADER_STAGE_COMPUTE_BIT, 0, @sizeOf(PropagatePush), &prop_push); + c.vkCmdDispatch(cmd, groups, groups, groups); + + c.vkCmdPipelineBarrier(cmd, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 1, &mem_barrier, 0, null, 0, null); + use_ab = !use_ab; + } + + // Transition final textures to SHADER_READ_ONLY for fragment shader sampling + const final_is_a = (self.propagation_iterations % 2) == 0; + const final_textures = if (final_is_a) &self.grid_textures_a else &self.grid_textures_b; + + for (0..3) |ch| { + const final_tex = self.vk_ctx.resources.textures.get(final_textures[ch]) orelse return; + try self.transitionImage(cmd, final_tex.image.?, c.VK_IMAGE_LAYOUT_GENERAL, c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, c.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, c.VK_ACCESS_SHADER_WRITE_BIT, c.VK_ACCESS_SHADER_READ_BIT); + } + + if (final_is_a) { + self.image_layout_a = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + self.active_grid_textures = self.grid_textures_a; + } else { + self.image_layout_b = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + self.active_grid_textures = self.grid_textures_b; + } + } + + fn transitionImage( + self: *LPVSystem, + cmd: c.VkCommandBuffer, + image: c.VkImage, + old_layout: c.VkImageLayout, + new_layout: c.VkImageLayout, + src_stage: c.VkPipelineStageFlags, + dst_stage: c.VkPipelineStageFlags, + src_access: c.VkAccessFlags, + dst_access: c.VkAccessFlags, + ) !void { + _ = self; + if (old_layout == new_layout) return; + var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); + barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.oldLayout = old_layout; + barrier.newLayout = new_layout; + barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + barrier.subresourceRange.aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = 1; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = 1; + barrier.srcAccessMask = src_access; + barrier.dstAccessMask = dst_access; + + c.vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, null, 0, null, 1, &barrier); + } + + fn createGridTextures(self: *LPVSystem) !void { + const resources = try self.createGridResources(self.grid_size); + self.applyGridResources(resources); + + self.buildDebugOverlay(&.{}, 0); + try self.uploadDebugOverlay(); + } + + fn destroyGridTextures(self: *LPVSystem) void { + for (0..3) |ch| { + if (self.grid_textures_a[ch] != 0) { + self.rhi.destroyTexture(self.grid_textures_a[ch]); + self.grid_textures_a[ch] = 0; + } + if (self.grid_textures_b[ch] != 0) { + self.rhi.destroyTexture(self.grid_textures_b[ch]); + self.grid_textures_b[ch] = 0; + } + } + if (self.debug_overlay_texture != 0) { + self.rhi.destroyTexture(self.debug_overlay_texture); + self.debug_overlay_texture = 0; + } + if (self.debug_overlay_pixels.len > 0) { + self.allocator.free(self.debug_overlay_pixels); + self.debug_overlay_pixels = &.{}; + } + self.active_grid_textures = .{ 0, 0, 0 }; + } + + fn buildDebugOverlay(self: *LPVSystem, lights: []const GpuLight, light_count: usize) void { + const gs = @as(usize, self.grid_size); + var y: usize = 0; + while (y < gs) : (y += 1) { + var x: usize = 0; + while (x < gs) : (x += 1) { + const idx = (y * gs + x) * 4; + const checker: f32 = if (((x / 4) + (y / 4)) % 2 == 0) @as(f32, 1.5) else @as(f32, 2.0); + self.debug_overlay_pixels[idx + 0] = checker; + self.debug_overlay_pixels[idx + 1] = checker; + self.debug_overlay_pixels[idx + 2] = checker; + self.debug_overlay_pixels[idx + 3] = 1.0; + + if (x == 0 or y == 0 or x + 1 == gs or y + 1 == gs) { + self.debug_overlay_pixels[idx + 0] = 4.0; + self.debug_overlay_pixels[idx + 1] = 4.0; + self.debug_overlay_pixels[idx + 2] = 4.0; + } + } + } + + for (lights[0..light_count]) |light| { + const cx: f32 = ((light.pos_radius[0] - self.origin.x) / self.cell_size); + const cz: f32 = ((light.pos_radius[2] - self.origin.z) / self.cell_size); + const radius = @max(light.pos_radius[3], 0.5); + + var ty: usize = 0; + while (ty < gs) : (ty += 1) { + var tx: usize = 0; + while (tx < gs) : (tx += 1) { + const dx = @as(f32, @floatFromInt(tx)) - cx; + const dz = @as(f32, @floatFromInt(ty)) - cz; + const dist = @sqrt(dx * dx + dz * dz); + if (dist > radius) continue; + + const falloff = std.math.pow(f32, 1.0 - (dist / radius), 2.0); + const idx = (ty * gs + tx) * 4; + self.debug_overlay_pixels[idx + 0] += light.color[0] * falloff * 6.0; + self.debug_overlay_pixels[idx + 1] += light.color[1] * falloff * 6.0; + self.debug_overlay_pixels[idx + 2] += light.color[2] * falloff * 6.0; + } + } + } + + for (0..gs * gs) |i| { + const idx = i * 4; + self.debug_overlay_pixels[idx + 0] = toneMap(self.debug_overlay_pixels[idx + 0]); + self.debug_overlay_pixels[idx + 1] = toneMap(self.debug_overlay_pixels[idx + 1]); + self.debug_overlay_pixels[idx + 2] = toneMap(self.debug_overlay_pixels[idx + 2]); + } + } + + fn uploadDebugOverlay(self: *LPVSystem) !void { + if (self.debug_overlay_texture == 0 or self.debug_overlay_pixels.len == 0) return; + try self.rhi.updateTexture(self.debug_overlay_texture, std.mem.sliceAsBytes(self.debug_overlay_pixels)); + } + + fn destroyLightBuffer(self: *LPVSystem) void { + if (self.light_buffer.buffer != null) { + if (self.light_buffer.memory == null) { + std.log.warn("LPV light buffer has VkBuffer but null VkDeviceMemory during teardown", .{}); + } + if (self.light_buffer.mapped_ptr != null) { + c.vkUnmapMemory(self.vk_ctx.vulkan_device.vk_device, self.light_buffer.memory); + self.light_buffer.mapped_ptr = null; + } + c.vkDestroyBuffer(self.vk_ctx.vulkan_device.vk_device, self.light_buffer.buffer, null); + c.vkFreeMemory(self.vk_ctx.vulkan_device.vk_device, self.light_buffer.memory, null); + self.light_buffer = .{}; + } + } + + fn destroyOcclusionBuffer(self: *LPVSystem) void { + if (self.occlusion_buffer.buffer != null) { + if (self.occlusion_buffer.mapped_ptr != null) { + c.vkUnmapMemory(self.vk_ctx.vulkan_device.vk_device, self.occlusion_buffer.memory); + self.occlusion_buffer.mapped_ptr = null; + } + c.vkDestroyBuffer(self.vk_ctx.vulkan_device.vk_device, self.occlusion_buffer.buffer, null); + if (self.occlusion_buffer.memory != null) { + c.vkFreeMemory(self.vk_ctx.vulkan_device.vk_device, self.occlusion_buffer.memory, null); + } + self.occlusion_buffer = .{}; + } + if (self.occlusion_grid.len > 0) { + self.allocator.free(self.occlusion_grid); + self.occlusion_grid = &.{}; + } + } + + fn initComputeResources(self: *LPVSystem) !void { + const vk = self.vk_ctx.vulkan_device.vk_device; + + // SH L1: inject needs 3 output images + 1 SSBO = 4 bindings + // propagate needs 3 src + 3 dst images + 1 occlusion SSBO = 7 bindings + // Total images: inject(3) + prop_ab(6) + prop_ba(6) = 15 + // Total buffers: inject(1) + prop_ab(1) + prop_ba(1) = 3 + var pool_sizes = [_]c.VkDescriptorPoolSize{ + .{ .type = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 16 }, + .{ .type = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, .descriptorCount = 4 }, + }; + + var pool_info = std.mem.zeroes(c.VkDescriptorPoolCreateInfo); + pool_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; + pool_info.maxSets = 4; + pool_info.poolSizeCount = pool_sizes.len; + pool_info.pPoolSizes = &pool_sizes; + try Utils.checkVk(c.vkCreateDescriptorPool(vk, &pool_info, null, &self.descriptor_pool)); + + // Inject: binding 0,1,2 = output images (R,G,B SH channels), binding 3 = light buffer + const inject_bindings = [_]c.VkDescriptorSetLayoutBinding{ + .{ .binding = 0, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 1, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 2, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 3, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + }; + var inject_layout_info = std.mem.zeroes(c.VkDescriptorSetLayoutCreateInfo); + inject_layout_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + inject_layout_info.bindingCount = inject_bindings.len; + inject_layout_info.pBindings = &inject_bindings; + try Utils.checkVk(c.vkCreateDescriptorSetLayout(vk, &inject_layout_info, null, &self.inject_set_layout)); + + // Propagate: binding 0-2 = src (R,G,B), binding 3-5 = dst (R,G,B), binding 6 = occlusion + const prop_bindings = [_]c.VkDescriptorSetLayoutBinding{ + .{ .binding = 0, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 1, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 2, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 3, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 4, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 5, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + .{ .binding = 6, .descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT, .pImmutableSamplers = null }, + }; + var prop_layout_info = std.mem.zeroes(c.VkDescriptorSetLayoutCreateInfo); + prop_layout_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + prop_layout_info.bindingCount = prop_bindings.len; + prop_layout_info.pBindings = &prop_bindings; + try Utils.checkVk(c.vkCreateDescriptorSetLayout(vk, &prop_layout_info, null, &self.propagate_set_layout)); + + try self.allocateDescriptorSets(); + try self.updateDescriptorSets(); + + try self.createComputePipelines(); + } + + fn allocateDescriptorSets(self: *LPVSystem) !void { + const vk = self.vk_ctx.vulkan_device.vk_device; + + var inject_alloc = std.mem.zeroes(c.VkDescriptorSetAllocateInfo); + inject_alloc.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + inject_alloc.descriptorPool = self.descriptor_pool; + inject_alloc.descriptorSetCount = 1; + inject_alloc.pSetLayouts = &self.inject_set_layout; + try Utils.checkVk(c.vkAllocateDescriptorSets(vk, &inject_alloc, &self.inject_descriptor_set)); + + const layouts = [_]c.VkDescriptorSetLayout{ self.propagate_set_layout, self.propagate_set_layout }; + var prop_alloc = std.mem.zeroes(c.VkDescriptorSetAllocateInfo); + prop_alloc.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + prop_alloc.descriptorPool = self.descriptor_pool; + prop_alloc.descriptorSetCount = 2; + prop_alloc.pSetLayouts = &layouts; + var prop_sets: [2]c.VkDescriptorSet = .{ null, null }; + try Utils.checkVk(c.vkAllocateDescriptorSets(vk, &prop_alloc, &prop_sets)); + self.propagate_ab_descriptor_set = prop_sets[0]; + self.propagate_ba_descriptor_set = prop_sets[1]; + } + + fn updateDescriptorSets(self: *LPVSystem) !void { + // Resolve all 6 texture resources (3 channels x 2 grids) + var imgs_a: [3]c.VkDescriptorImageInfo = undefined; + var imgs_b: [3]c.VkDescriptorImageInfo = undefined; + for (0..3) |ch| { + const tex_a = self.vk_ctx.resources.textures.get(self.grid_textures_a[ch]) orelse return error.ResourceNotFound; + const tex_b = self.vk_ctx.resources.textures.get(self.grid_textures_b[ch]) orelse return error.ResourceNotFound; + imgs_a[ch] = c.VkDescriptorImageInfo{ .sampler = null, .imageView = tex_a.view, .imageLayout = c.VK_IMAGE_LAYOUT_GENERAL }; + imgs_b[ch] = c.VkDescriptorImageInfo{ .sampler = null, .imageView = tex_b.view, .imageLayout = c.VK_IMAGE_LAYOUT_GENERAL }; + } + var light_info = c.VkDescriptorBufferInfo{ .buffer = self.light_buffer.buffer, .offset = 0, .range = @sizeOf(GpuLight) * MAX_LIGHTS_PER_UPDATE }; + const occlusion_size = @as(usize, self.grid_size) * @as(usize, self.grid_size) * @as(usize, self.grid_size) * @sizeOf(u32); + var occlusion_info = c.VkDescriptorBufferInfo{ .buffer = self.occlusion_buffer.buffer, .offset = 0, .range = @intCast(occlusion_size) }; + + // Inject: bindings 0,1,2 = output R,G,B images (grid A), binding 3 = light buffer + // Propagate A->B: bindings 0-2 = src (A), bindings 3-5 = dst (B), binding 6 = occlusion + // Propagate B->A: bindings 0-2 = src (B), bindings 3-5 = dst (A), binding 6 = occlusion + // Total writes: 4 (inject) + 7 (prop_ab) + 7 (prop_ba) = 18 + var writes: [18]c.VkWriteDescriptorSet = undefined; + var n: usize = 0; + + // --- Inject set --- + for (0..3) |ch| { + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.inject_descriptor_set; + writes[n].dstBinding = @intCast(ch); + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; + writes[n].descriptorCount = 1; + writes[n].pImageInfo = &imgs_a[ch]; + n += 1; + } + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.inject_descriptor_set; + writes[n].dstBinding = 3; + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + writes[n].descriptorCount = 1; + writes[n].pBufferInfo = &light_info; + n += 1; + + // --- Propagate A->B set --- + for (0..3) |ch| { + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.propagate_ab_descriptor_set; + writes[n].dstBinding = @intCast(ch); + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; + writes[n].descriptorCount = 1; + writes[n].pImageInfo = &imgs_a[ch]; + n += 1; + } + for (0..3) |ch| { + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.propagate_ab_descriptor_set; + writes[n].dstBinding = @intCast(ch + 3); + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; + writes[n].descriptorCount = 1; + writes[n].pImageInfo = &imgs_b[ch]; + n += 1; + } + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.propagate_ab_descriptor_set; + writes[n].dstBinding = 6; + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + writes[n].descriptorCount = 1; + writes[n].pBufferInfo = &occlusion_info; + n += 1; + + // --- Propagate B->A set --- + for (0..3) |ch| { + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.propagate_ba_descriptor_set; + writes[n].dstBinding = @intCast(ch); + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; + writes[n].descriptorCount = 1; + writes[n].pImageInfo = &imgs_b[ch]; + n += 1; + } + for (0..3) |ch| { + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.propagate_ba_descriptor_set; + writes[n].dstBinding = @intCast(ch + 3); + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; + writes[n].descriptorCount = 1; + writes[n].pImageInfo = &imgs_a[ch]; + n += 1; + } + writes[n] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[n].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[n].dstSet = self.propagate_ba_descriptor_set; + writes[n].dstBinding = 6; + writes[n].descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + writes[n].descriptorCount = 1; + writes[n].pBufferInfo = &occlusion_info; + n += 1; + + c.vkUpdateDescriptorSets(self.vk_ctx.vulkan_device.vk_device, @intCast(n), &writes[0], 0, null); + } + + fn createComputePipelines(self: *LPVSystem) !void { + const vk = self.vk_ctx.vulkan_device.vk_device; + + const inject_module = try createShaderModule(vk, INJECT_SHADER_PATH, self.allocator); + defer c.vkDestroyShaderModule(vk, inject_module, null); + const propagate_module = try createShaderModule(vk, PROPAGATE_SHADER_PATH, self.allocator); + defer c.vkDestroyShaderModule(vk, propagate_module, null); + + var inject_pc = std.mem.zeroes(c.VkPushConstantRange); + inject_pc.stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT; + inject_pc.offset = 0; + inject_pc.size = @sizeOf(InjectPush); + + var inject_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + inject_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + inject_layout_info.setLayoutCount = 1; + inject_layout_info.pSetLayouts = &self.inject_set_layout; + inject_layout_info.pushConstantRangeCount = 1; + inject_layout_info.pPushConstantRanges = &inject_pc; + try Utils.checkVk(c.vkCreatePipelineLayout(vk, &inject_layout_info, null, &self.inject_pipeline_layout)); + + var prop_pc = std.mem.zeroes(c.VkPushConstantRange); + prop_pc.stageFlags = c.VK_SHADER_STAGE_COMPUTE_BIT; + prop_pc.offset = 0; + prop_pc.size = @sizeOf(PropagatePush); + + var prop_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + prop_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + prop_layout_info.setLayoutCount = 1; + prop_layout_info.pSetLayouts = &self.propagate_set_layout; + prop_layout_info.pushConstantRangeCount = 1; + prop_layout_info.pPushConstantRanges = &prop_pc; + try Utils.checkVk(c.vkCreatePipelineLayout(vk, &prop_layout_info, null, &self.propagate_pipeline_layout)); + + var inject_stage = std.mem.zeroes(c.VkPipelineShaderStageCreateInfo); + inject_stage.sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + inject_stage.stage = c.VK_SHADER_STAGE_COMPUTE_BIT; + inject_stage.module = inject_module; + inject_stage.pName = "main"; + + var inject_info = std.mem.zeroes(c.VkComputePipelineCreateInfo); + inject_info.sType = c.VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; + inject_info.stage = inject_stage; + inject_info.layout = self.inject_pipeline_layout; + try Utils.checkVk(c.vkCreateComputePipelines(vk, null, 1, &inject_info, null, &self.inject_pipeline)); + + var prop_stage = std.mem.zeroes(c.VkPipelineShaderStageCreateInfo); + prop_stage.sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + prop_stage.stage = c.VK_SHADER_STAGE_COMPUTE_BIT; + prop_stage.module = propagate_module; + prop_stage.pName = "main"; + + var prop_info = std.mem.zeroes(c.VkComputePipelineCreateInfo); + prop_info.sType = c.VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; + prop_info.stage = prop_stage; + prop_info.layout = self.propagate_pipeline_layout; + try Utils.checkVk(c.vkCreateComputePipelines(vk, null, 1, &prop_info, null, &self.propagate_pipeline)); + } + + fn deinitComputeResources(self: *LPVSystem) void { + const vk = self.vk_ctx.vulkan_device.vk_device; + if (self.inject_pipeline != null) c.vkDestroyPipeline(vk, self.inject_pipeline, null); + if (self.propagate_pipeline != null) c.vkDestroyPipeline(vk, self.propagate_pipeline, null); + if (self.inject_pipeline_layout != null) c.vkDestroyPipelineLayout(vk, self.inject_pipeline_layout, null); + if (self.propagate_pipeline_layout != null) c.vkDestroyPipelineLayout(vk, self.propagate_pipeline_layout, null); + if (self.inject_set_layout != null) c.vkDestroyDescriptorSetLayout(vk, self.inject_set_layout, null); + if (self.propagate_set_layout != null) c.vkDestroyDescriptorSetLayout(vk, self.propagate_set_layout, null); + if (self.descriptor_pool != null) c.vkDestroyDescriptorPool(vk, self.descriptor_pool, null); + + self.inject_pipeline = null; + self.propagate_pipeline = null; + self.inject_pipeline_layout = null; + self.propagate_pipeline_layout = null; + self.inject_set_layout = null; + self.propagate_set_layout = null; + self.descriptor_pool = null; + self.inject_descriptor_set = null; + self.propagate_ab_descriptor_set = null; + self.propagate_ba_descriptor_set = null; + } +}; + +fn quantizeToCell(value: f32, cell_size: f32) f32 { + return @floor(value / cell_size) * cell_size; +} + +fn divCeil(v: u32, d: u32) u32 { + return @divFloor(v + d - 1, d); +} + +fn toneMap(v: f32) f32 { + const x = @max(v, 0.0); + return x / (1.0 + x); +} + +fn createShaderModule(vk: c.VkDevice, path: []const u8, allocator: std.mem.Allocator) !c.VkShaderModule { + const bytes = try std.fs.cwd().readFileAlloc(path, allocator, @enumFromInt(16 * 1024 * 1024)); + defer allocator.free(bytes); + if (bytes.len % 4 != 0) return error.InvalidState; + + var info = std.mem.zeroes(c.VkShaderModuleCreateInfo); + info.sType = c.VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + info.codeSize = bytes.len; + info.pCode = @ptrCast(@alignCast(bytes.ptr)); + + var module: c.VkShaderModule = null; + try Utils.checkVk(c.vkCreateShaderModule(vk, &info, null, &module)); + return module; +} + +fn ensureShaderFileExists(path: []const u8) !void { + std.fs.cwd().access(path, .{}) catch |err| { + std.log.err("LPV shader artifact missing: {s} ({})", .{ path, err }); + std.log.err("Run `nix develop --command zig build` to regenerate Vulkan SPIR-V shaders.", .{}); + return err; + }; +} diff --git a/src/engine/graphics/render_graph.zig b/src/engine/graphics/render_graph.zig index e319f93c..216b10fb 100644 --- a/src/engine/graphics/render_graph.zig +++ b/src/engine/graphics/render_graph.zig @@ -30,11 +30,17 @@ pub const SceneContext = struct { disable_gpass_draw: bool, disable_ssao: bool, disable_clouds: bool, - // Phase 3: FXAA and Bloom flags + // Post-processing flags fxaa_enabled: bool = true, bloom_enabled: bool = true, overlay_renderer: ?*const fn (ctx: SceneContext) void = null, overlay_ctx: ?*anyopaque = null, + lpv_texture_handle: rhi_pkg.TextureHandle = 0, + lpv_texture_handle_g: rhi_pkg.TextureHandle = 0, + lpv_texture_handle_b: rhi_pkg.TextureHandle = 0, + // Pointer to frame-local cascade storage, computed once per frame by the first + // ShadowPass and reused by subsequent cascade passes to guarantee consistency. + cached_cascades: *?CSM.ShadowCascades, }; pub const IRenderPass = struct { @@ -113,7 +119,7 @@ pub const RenderGraph = struct { // --- Standard Pass Implementations --- -const SHADOW_PASS_NAMES = [_][]const u8{ "ShadowPass0", "ShadowPass1", "ShadowPass2" }; +const SHADOW_PASS_NAMES = [_][]const u8{ "ShadowPass0", "ShadowPass1", "ShadowPass2", "ShadowPass3" }; pub const ShadowPass = struct { cascade_index: u32, @@ -126,6 +132,7 @@ pub const ShadowPass = struct { .{ .name = "ShadowPass0", .needs_main_pass = false, .execute = execute }, .{ .name = "ShadowPass1", .needs_main_pass = false, .execute = execute }, .{ .name = "ShadowPass2", .needs_main_pass = false, .execute = execute }, + .{ .name = "ShadowPass3", .needs_main_pass = false, .execute = execute }, }; pub fn pass(self: *ShadowPass) IRenderPass { @@ -143,27 +150,43 @@ pub const ShadowPass = struct { const cascade_idx = self.cascade_index; const rhi = ctx.rhi; - const cascades = CSM.computeCascades( - ctx.shadow.resolution, - ctx.camera.fov, - ctx.aspect, - 0.1, - ctx.shadow.distance, - ctx.sky_params.sun_dir, - ctx.camera.getViewMatrixOriginCentered(), - true, - ); + // Compute cascades once per frame and cache via shared pointer so all + // cascade passes within the same frame use identical matrices. + const cascades = if (ctx.cached_cascades.*) |cached| cached else blk: { + const computed = CSM.computeCascades( + ctx.shadow.resolution, + ctx.camera.fov, + ctx.aspect, + 0.1, + ctx.shadow.distance, + ctx.sky_params.sun_dir, + ctx.camera.getViewMatrixOriginCentered(), + true, + ); + // Validate cascade data before using + if (!CSM.validateCascades(computed, log.log)) { + log.log.err("ShadowPass{}: Invalid cascade data, skipping shadow pass", .{cascade_idx}); + return error.InvalidShadowCascades; + } + ctx.cached_cascades.* = computed; + break :blk computed; + }; + const light_space_matrix = cascades.light_space_matrices[cascade_idx]; - try rhi.updateShadowUniforms(.{ - .light_space_matrices = cascades.light_space_matrices, - .cascade_splits = cascades.cascade_splits, - .shadow_texel_sizes = cascades.texel_sizes, - }); + // Only update uniforms on first cascade pass + if (cascade_idx == 0) { + try rhi.updateShadowUniforms(.{ + .light_space_matrices = cascades.light_space_matrices, + .cascade_splits = cascades.cascade_splits, + .shadow_texel_sizes = cascades.texel_sizes, + }); + } if (ctx.disable_shadow_draw) return; rhi.beginShadowPass(cascade_idx, light_space_matrix); + errdefer rhi.endShadowPass(); ctx.shadow_scene.renderShadowPass(light_space_matrix, ctx.camera.position); rhi.endShadowPass(); } @@ -262,6 +285,9 @@ pub const OpaquePass = struct { const rhi = ctx.rhi; rhi.bindShader(ctx.main_shader); ctx.material_system.bindTerrainMaterial(ctx.env_map_handle); + rhi.bindTexture(ctx.lpv_texture_handle, 11); + rhi.bindTexture(ctx.lpv_texture_handle_g, 12); + rhi.bindTexture(ctx.lpv_texture_handle_b, 13); const view_proj = Mat4.perspectiveReverseZ(ctx.camera.fov, ctx.aspect, ctx.camera.near, ctx.camera.far).multiply(ctx.camera.getViewMatrixOriginCentered()); ctx.world.render(view_proj, ctx.camera.position, true); } @@ -338,7 +364,7 @@ pub const PostProcessPass = struct { } }; -// Phase 3: Bloom Pass - Computes bloom mip chain from HDR buffer +// Bloom pass - computes bloom mip chain from HDR buffer pub const BloomPass = struct { enabled: bool = true, const VTABLE = IRenderPass.VTable{ @@ -360,7 +386,7 @@ pub const BloomPass = struct { } }; -// Phase 3: FXAA Pass - Applies FXAA to LDR output +// FXAA pass - applies anti-aliasing to LDR output pub const FXAAPass = struct { enabled: bool = true, const VTABLE = IRenderPass.VTable{ diff --git a/src/engine/graphics/resource_pack.zig b/src/engine/graphics/resource_pack.zig index f96d96b4..790c930e 100644 --- a/src/engine/graphics/resource_pack.zig +++ b/src/engine/graphics/resource_pack.zig @@ -89,6 +89,8 @@ pub const BLOCK_TEXTURES = [_]TextureMapping{ .{ .name = "flower_red", .files = &.{ "flower_red.png", "flower_rose.png", "poppy.png" } }, .{ .name = "flower_yellow", .files = &.{ "flower_yellow.png", "flower_dandelion.png", "dandelion.png" } }, .{ .name = "dead_bush", .files = &.{ "dead_bush.png", "deadbush.png" } }, + .{ .name = "torch", .files = &.{ "torch.png", "torch_on.png" } }, + .{ .name = "lava", .files = &.{ "lava.png", "lava_still.png" } }, }; pub const LoadedTexture = struct { diff --git a/src/engine/graphics/rhi.zig b/src/engine/graphics/rhi.zig index 482a434e..9fc1ea84 100644 --- a/src/engine/graphics/rhi.zig +++ b/src/engine/graphics/rhi.zig @@ -52,6 +52,7 @@ pub const IResourceFactory = struct { updateBuffer: *const fn (ptr: *anyopaque, handle: BufferHandle, offset: usize, data: []const u8) RhiError!void, destroyBuffer: *const fn (ptr: *anyopaque, handle: BufferHandle) void, createTexture: *const fn (ptr: *anyopaque, width: u32, height: u32, format: TextureFormat, config: TextureConfig, data: ?[]const u8) RhiError!TextureHandle, + createTexture3D: *const fn (ptr: *anyopaque, width: u32, height: u32, depth: u32, format: TextureFormat, config: TextureConfig, data: ?[]const u8) RhiError!TextureHandle, destroyTexture: *const fn (ptr: *anyopaque, handle: TextureHandle) void, updateTexture: *const fn (ptr: *anyopaque, handle: TextureHandle, data: []const u8) RhiError!void, createShader: *const fn (ptr: *anyopaque, vertex_src: [*c]const u8, fragment_src: [*c]const u8) RhiError!ShaderHandle, @@ -75,6 +76,9 @@ pub const IResourceFactory = struct { pub fn createTexture(self: IResourceFactory, width: u32, height: u32, format: TextureFormat, config: TextureConfig, data: ?[]const u8) RhiError!TextureHandle { return self.vtable.createTexture(self.ptr, width, height, format, config, data); } + pub fn createTexture3D(self: IResourceFactory, width: u32, height: u32, depth: u32, format: TextureFormat, config: TextureConfig, data: ?[]const u8) RhiError!TextureHandle { + return self.vtable.createTexture3D(self.ptr, width, height, depth, format, config, data); + } pub fn destroyTexture(self: IResourceFactory, handle: TextureHandle) void { self.vtable.destroyTexture(self.ptr, handle); } @@ -277,10 +281,10 @@ pub const IRenderContext = struct { endPostProcessPass: *const fn (ptr: *anyopaque) void, beginGPass: *const fn (ptr: *anyopaque) void, endGPass: *const fn (ptr: *anyopaque) void, - // FXAA Pass (Phase 3) + // FXAA pass beginFXAAPass: *const fn (ptr: *anyopaque) void, endFXAAPass: *const fn (ptr: *anyopaque) void, - // Bloom Pass (Phase 3) + // Bloom pass computeBloom: *const fn (ptr: *anyopaque) void, getEncoder: *const fn (ptr: *anyopaque) IGraphicsCommandEncoder, getStateContext: *const fn (ptr: *anyopaque) IRenderStateContext, @@ -468,10 +472,16 @@ pub const RHI = struct { setVolumetricDensity: *const fn (ctx: *anyopaque, density: f32) void, setMSAA: *const fn (ctx: *anyopaque, samples: u8) void, recover: *const fn (ctx: *anyopaque) anyerror!void, - // Phase 3: FXAA and Bloom options + // Post-processing options setFXAA: *const fn (ctx: *anyopaque, enabled: bool) void, setBloom: *const fn (ctx: *anyopaque, enabled: bool) void, setBloomIntensity: *const fn (ctx: *anyopaque, intensity: f32) void, + setVignetteEnabled: *const fn (ctx: *anyopaque, enabled: bool) void, + setVignetteIntensity: *const fn (ctx: *anyopaque, intensity: f32) void, + setFilmGrainEnabled: *const fn (ctx: *anyopaque, enabled: bool) void, + setFilmGrainIntensity: *const fn (ctx: *anyopaque, intensity: f32) void, + setColorGradingEnabled: *const fn (ctx: *anyopaque, enabled: bool) void, + setColorGradingIntensity: *const fn (ctx: *anyopaque, intensity: f32) void, }; pub fn factory(self: RHI) IResourceFactory { @@ -696,7 +706,7 @@ pub const RHI = struct { pub fn bindUIPipeline(self: RHI, textured: bool) void { self.vtable.ui.bindPipeline(self.ptr, textured); } - // Phase 3: FXAA and Bloom controls + // Post-processing controls pub fn setFXAA(self: RHI, enabled: bool) void { self.vtable.setFXAA(self.ptr, enabled); } @@ -706,4 +716,22 @@ pub const RHI = struct { pub fn setBloomIntensity(self: RHI, intensity: f32) void { self.vtable.setBloomIntensity(self.ptr, intensity); } + pub fn setVignetteEnabled(self: RHI, enabled: bool) void { + self.vtable.setVignetteEnabled(self.ptr, enabled); + } + pub fn setVignetteIntensity(self: RHI, intensity: f32) void { + self.vtable.setVignetteIntensity(self.ptr, intensity); + } + pub fn setFilmGrainEnabled(self: RHI, enabled: bool) void { + self.vtable.setFilmGrainEnabled(self.ptr, enabled); + } + pub fn setFilmGrainIntensity(self: RHI, intensity: f32) void { + self.vtable.setFilmGrainIntensity(self.ptr, intensity); + } + pub fn setColorGradingEnabled(self: RHI, enabled: bool) void { + self.vtable.setColorGradingEnabled(self.ptr, enabled); + } + pub fn setColorGradingIntensity(self: RHI, intensity: f32) void { + self.vtable.setColorGradingIntensity(self.ptr, intensity); + } }; diff --git a/src/engine/graphics/rhi_tests.zig b/src/engine/graphics/rhi_tests.zig index ea52b812..a497199a 100644 --- a/src/engine/graphics/rhi_tests.zig +++ b/src/engine/graphics/rhi_tests.zig @@ -203,6 +203,7 @@ const MockContext = struct { .updateBuffer = updateBuffer, .destroyBuffer = destroyBuffer, .createTexture = createTexture, + .createTexture3D = createTexture3D, .destroyTexture = destroyTexture, .updateTexture = updateTexture, .createShader = createShader, @@ -241,6 +242,16 @@ const MockContext = struct { _ = data; return 1; } + fn createTexture3D(ptr: *anyopaque, width: u32, height: u32, depth: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, data: ?[]const u8) rhi.RhiError!rhi.TextureHandle { + _ = ptr; + _ = width; + _ = height; + _ = depth; + _ = format; + _ = config; + _ = data; + return 1; + } fn destroyTexture(ptr: *anyopaque, handle: rhi.TextureHandle) void { _ = ptr; _ = handle; @@ -323,6 +334,12 @@ const MockContext = struct { .setFXAA = undefined, .setBloom = undefined, .setBloomIntensity = undefined, + .setVignetteEnabled = undefined, + .setVignetteIntensity = undefined, + .setFilmGrainEnabled = undefined, + .setFilmGrainIntensity = undefined, + .setColorGradingEnabled = undefined, + .setColorGradingIntensity = undefined, }; const MOCK_ENCODER_VTABLE = rhi.IGraphicsCommandEncoder.VTable{ diff --git a/src/engine/graphics/rhi_types.zig b/src/engine/graphics/rhi_types.zig index afed550f..2031a880 100644 --- a/src/engine/graphics/rhi_types.zig +++ b/src/engine/graphics/rhi_types.zig @@ -36,8 +36,8 @@ pub const InvalidTextureHandle: TextureHandle = 0; pub const MAX_FRAMES_IN_FLIGHT = 2; /// Number of cascaded shadow map splits. -/// 3 cascades provide a good balance between quality (near detail) and performance (draw calls). -pub const SHADOW_CASCADE_COUNT = 3; +/// 4 cascades provide smoother transitions for large shadow distances (1000+) while maintaining quality. +pub const SHADOW_CASCADE_COUNT = 4; pub const BufferUsage = enum { vertex, @@ -166,12 +166,15 @@ pub const ShadowConfig = struct { resolution: u32 = 4096, pcf_samples: u8 = 12, cascade_blend: bool = true, + strength: f32 = 0.35, // Cloud shadow intensity (0-1) + light_size: f32 = 3.0, // PCSS light source size (world units) - controls penumbra softness }; pub const ShadowParams = struct { light_space_matrices: [SHADOW_CASCADE_COUNT]Mat4, cascade_splits: [SHADOW_CASCADE_COUNT]f32, shadow_texel_sizes: [SHADOW_CASCADE_COUNT]f32, + light_size: f32 = 3.0, // PCSS light source size for penumbra estimation }; pub const CloudParams = struct { @@ -198,6 +201,11 @@ pub const CloudParams = struct { exposure: f32 = 0.9, saturation: f32 = 1.3, ssao_enabled: bool = true, + lpv_enabled: bool = true, + lpv_intensity: f32 = 0.5, + lpv_cell_size: f32 = 2.0, + lpv_grid_size: u32 = 32, + lpv_origin: Vec3 = Vec3.init(0.0, 0.0, 0.0), }; pub const Color = struct { @@ -232,6 +240,7 @@ pub const GpuTimingResults = struct { shadow_pass_ms: [SHADOW_CASCADE_COUNT]f32, g_pass_ms: f32, ssao_pass_ms: f32, + lpv_pass_ms: f32, sky_pass_ms: f32, opaque_pass_ms: f32, cloud_pass_ms: f32, diff --git a/src/engine/graphics/rhi_vulkan.zig b/src/engine/graphics/rhi_vulkan.zig index 12414a1e..5a6855c1 100644 --- a/src/engine/graphics/rhi_vulkan.zig +++ b/src/engine/graphics/rhi_vulkan.zig @@ -1,2496 +1,34 @@ -//! Vulkan Rendering Hardware Interface (RHI) Backend -//! -//! This module implements the RHI interface for Vulkan, providing GPU abstraction. -//! -//! ## Robustness & Safety -//! The backend implements a Guarded Submission model to handle GPU hangs gracefully. -//! Every queue submission is wrapped in `submitGuarded()`, which detects `VK_ERROR_DEVICE_LOST` -//! and initiates a safe teardown or recovery path. -//! -//! Out-of-bounds GPU memory accesses are handled via `VK_EXT_robustness2`, which -//! ensures that such operations return safe values (zeros) rather than crashing -//! the system. Detailed fault information is logged using `VK_EXT_device_fault`. -//! -//! ## Recovery -//! When a GPU fault is detected, the `gpu_fault_detected` flag is set. The engine -//! attempts to stop further submissions and should ideally trigger a device recreation. -//! Currently, the engine logs the fault and requires an application restart for full recovery. -//! -//! ## Thread Safety -//! A mutex protects buffer/texture maps. Vulkan commands are NOT thread-safe -//! - all rendering must occur on the main thread. Queue submissions are synchronized -//! via an internal mutex in `VulkanDevice`. -//! const std = @import("std"); const c = @import("../../c.zig").c; const rhi = @import("rhi.zig"); -const VulkanDevice = @import("vulkan_device.zig").VulkanDevice; -const VulkanSwapchain = @import("vulkan_swapchain.zig").VulkanSwapchain; const RenderDevice = @import("render_device.zig").RenderDevice; const Mat4 = @import("../math/mat4.zig").Mat4; const Vec3 = @import("../math/vec3.zig").Vec3; -const build_options = @import("build_options"); - -const resource_manager_pkg = @import("vulkan/resource_manager.zig"); -const ResourceManager = resource_manager_pkg.ResourceManager; -const FrameManager = @import("vulkan/frame_manager.zig").FrameManager; -const SwapchainPresenter = @import("vulkan/swapchain_presenter.zig").SwapchainPresenter; -const DescriptorManager = @import("vulkan/descriptor_manager.zig").DescriptorManager; -const Utils = @import("vulkan/utils.zig"); -const shader_registry = @import("vulkan/shader_registry.zig"); -const bloom_system_pkg = @import("vulkan/bloom_system.zig"); -const BloomSystem = bloom_system_pkg.BloomSystem; -const BloomPushConstants = bloom_system_pkg.BloomPushConstants; -const fxaa_system_pkg = @import("vulkan/fxaa_system.zig"); -const FXAASystem = fxaa_system_pkg.FXAASystem; -const FXAAPushConstants = fxaa_system_pkg.FXAAPushConstants; -const ssao_system_pkg = @import("vulkan/ssao_system.zig"); -const SSAOSystem = ssao_system_pkg.SSAOSystem; -const SSAOParams = ssao_system_pkg.SSAOParams; - -/// GPU Render Passes for profiling -const GpuPass = enum { - shadow_0, - shadow_1, - shadow_2, - g_pass, - ssao, - sky, - opaque_pass, - cloud, - bloom, - fxaa, - post_process, - - pub const COUNT = 11; -}; - -/// Push constants for post-process pass (tonemapping + bloom integration) -const PostProcessPushConstants = extern struct { - bloom_enabled: f32, // 0.0 = disabled, 1.0 = enabled - bloom_intensity: f32, // Final bloom blend intensity -}; - -const MAX_FRAMES_IN_FLIGHT = rhi.MAX_FRAMES_IN_FLIGHT; -const BLOOM_MIP_COUNT = rhi.BLOOM_MIP_COUNT; -const DEPTH_FORMAT = c.VK_FORMAT_D32_SFLOAT; - -/// Global uniform buffer layout (std140). Bound to descriptor set 0, binding 0. -const GlobalUniforms = extern struct { - view_proj: Mat4, // Combined view-projection matrix - view_proj_prev: Mat4, // Previous frame's view-projection for velocity buffer - cam_pos: [4]f32, // Camera world position (w unused) - sun_dir: [4]f32, // Sun direction (w unused) - sun_color: [4]f32, // Sun color (w unused) - fog_color: [4]f32, // Fog RGB (a unused) - cloud_wind_offset: [4]f32, // xy = offset, z = scale, w = coverage - params: [4]f32, // x = time, y = fog_density, z = fog_enabled, w = sun_intensity - lighting: [4]f32, // x = ambient, y = use_texture, z = pbr_enabled, w = cloud_shadow_strength - cloud_params: [4]f32, // x = cloud_height, y = pcf_samples, z = cascade_blend, w = cloud_shadows - pbr_params: [4]f32, // x = pbr_quality, y = exposure, z = saturation, w = ssao_strength - volumetric_params: [4]f32, // x = enabled, y = density, z = steps, w = scattering - viewport_size: [4]f32, // xy = width/height, zw = unused -}; - -const QUERY_COUNT_PER_FRAME = GpuPass.COUNT * 2; -const TOTAL_QUERY_COUNT = QUERY_COUNT_PER_FRAME * MAX_FRAMES_IN_FLIGHT; - -/// Shadow cascade uniforms for CSM. Bound to descriptor set 0, binding 2. -const ShadowUniforms = extern struct { - light_space_matrices: [rhi.SHADOW_CASCADE_COUNT]Mat4, - cascade_splits: [4]f32, // vec4 in shader - shadow_texel_sizes: [4]f32, // vec4 in shader -}; - -/// Per-draw model matrix, passed via push constants for efficiency. -const ModelUniforms = extern struct { - model: Mat4, - color: [3]f32, - mask_radius: f32, -}; - -/// Per-draw shadow matrix and model, passed via push constants. -const ShadowModelUniforms = extern struct { - mvp: Mat4, - bias_params: [4]f32, // x=normalBias, y=slopeBias, z=cascadeIndex, w=texelSize -}; - -/// Push constants for procedural sky rendering. -const SkyPushConstants = extern struct { - cam_forward: [4]f32, - cam_right: [4]f32, - cam_up: [4]f32, - sun_dir: [4]f32, - sky_color: [4]f32, - horizon_color: [4]f32, - params: [4]f32, - time: [4]f32, -}; - -const VulkanBuffer = resource_manager_pkg.VulkanBuffer; -const TextureResource = resource_manager_pkg.TextureResource; - -const ShadowSystem = @import("shadow_system.zig").ShadowSystem; - -const DebugShadowResources = if (build_options.debug_shadows) struct { - pipeline: ?c.VkPipeline = null, - pipeline_layout: ?c.VkPipelineLayout = null, - descriptor_set_layout: ?c.VkDescriptorSetLayout = null, - descriptor_sets: [MAX_FRAMES_IN_FLIGHT]?c.VkDescriptorSet = .{null} ** MAX_FRAMES_IN_FLIGHT, - descriptor_pool: [MAX_FRAMES_IN_FLIGHT][8]?c.VkDescriptorSet = .{.{null} ** 8} ** MAX_FRAMES_IN_FLIGHT, - descriptor_next: [MAX_FRAMES_IN_FLIGHT]u32 = .{0} ** MAX_FRAMES_IN_FLIGHT, - vbo: VulkanBuffer = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }, -} else struct {}; - -/// Core Vulkan context containing all renderer state. -/// Owns Vulkan objects and manages their lifecycle. -const VulkanContext = struct { - allocator: std.mem.Allocator, - window: *c.SDL_Window, - render_device: ?*RenderDevice, - - // Subsystems - vulkan_device: VulkanDevice, - resources: ResourceManager, - frames: FrameManager, - swapchain: SwapchainPresenter, - descriptors: DescriptorManager, - - // Legacy / Feature State - - // Dummy shadow texture for fallback - dummy_shadow_image: c.VkImage = null, - dummy_shadow_memory: c.VkDeviceMemory = null, - dummy_shadow_view: c.VkImageView = null, - - // Uniforms (Model UBOs are per-draw/push constant, but we have a fallback/dummy?) - // descriptor_manager handles Global and Shadow UBOs. - // We still need dummy_instance_buffer? - model_ubo: VulkanBuffer = .{}, // Is this used? - dummy_instance_buffer: VulkanBuffer = .{}, - - transfer_fence: c.VkFence = null, // Keep for legacy sync if needed - - // Pipeline - pipeline_layout: c.VkPipelineLayout = null, - pipeline: c.VkPipeline = null, - - sky_pipeline: c.VkPipeline = null, - sky_pipeline_layout: c.VkPipelineLayout = null, - - // Binding State - current_texture: rhi.TextureHandle, - current_normal_texture: rhi.TextureHandle, - current_roughness_texture: rhi.TextureHandle, - current_displacement_texture: rhi.TextureHandle, - current_env_texture: rhi.TextureHandle, - dummy_texture: rhi.TextureHandle, - dummy_normal_texture: rhi.TextureHandle, - dummy_roughness_texture: rhi.TextureHandle, - bound_texture: rhi.TextureHandle, - bound_normal_texture: rhi.TextureHandle, - bound_roughness_texture: rhi.TextureHandle, - bound_displacement_texture: rhi.TextureHandle, - bound_env_texture: rhi.TextureHandle, - bound_ssao_handle: rhi.TextureHandle = 0, - bound_shadow_views: [rhi.SHADOW_CASCADE_COUNT]c.VkImageView, - descriptors_dirty: [MAX_FRAMES_IN_FLIGHT]bool, - - // Rendering options - wireframe_enabled: bool = false, - textures_enabled: bool = true, - wireframe_pipeline: c.VkPipeline = null, - vsync_enabled: bool = true, - present_mode: c.VkPresentModeKHR = c.VK_PRESENT_MODE_FIFO_KHR, - anisotropic_filtering: u8 = 1, - msaa_samples: u8 = 1, - safe_mode: bool = false, - debug_shadows_active: bool = false, // Toggle shadow debug visualization with 'O' key - - // G-Pass resources - g_normal_image: c.VkImage = null, - g_normal_memory: c.VkDeviceMemory = null, - g_normal_view: c.VkImageView = null, - g_normal_handle: rhi.TextureHandle = 0, - g_depth_image: c.VkImage = null, // G-Pass depth (1x sampled for SSAO) - g_depth_memory: c.VkDeviceMemory = null, - g_depth_view: c.VkImageView = null, - - // G-Pass & Passes - g_render_pass: c.VkRenderPass = null, - main_framebuffer: c.VkFramebuffer = null, - g_framebuffer: c.VkFramebuffer = null, - // Track the extent G-pass resources were created with (for mismatch detection) - g_pass_extent: c.VkExtent2D = .{ .width = 0, .height = 0 }, - - // G-Pass Pipelines - g_pipeline: c.VkPipeline = null, - g_pipeline_layout: c.VkPipelineLayout = null, - gpu_fault_detected: bool = false, - - shadow_system: ShadowSystem, - ssao_system: SSAOSystem = .{}, - shadow_map_handles: [rhi.SHADOW_CASCADE_COUNT]rhi.TextureHandle = .{0} ** rhi.SHADOW_CASCADE_COUNT, - shadow_texel_sizes: [rhi.SHADOW_CASCADE_COUNT]f32 = .{0.0} ** rhi.SHADOW_CASCADE_COUNT, - shadow_resolution: u32, - memory_type_index: u32, - framebuffer_resized: bool, - draw_call_count: u32, - main_pass_active: bool = false, - g_pass_active: bool = false, - ssao_pass_active: bool = false, - post_process_ran_this_frame: bool = false, - fxaa_ran_this_frame: bool = false, - pipeline_rebuild_needed: bool = false, - - // Frame state - frame_index: usize, - image_index: u32, - - terrain_pipeline_bound: bool = false, - descriptors_updated: bool = false, - lod_mode: bool = false, - bound_instance_buffer: [MAX_FRAMES_IN_FLIGHT]rhi.BufferHandle = .{ 0, 0 }, - bound_lod_instance_buffer: [MAX_FRAMES_IN_FLIGHT]rhi.BufferHandle = .{ 0, 0 }, - pending_instance_buffer: rhi.BufferHandle = 0, - pending_lod_instance_buffer: rhi.BufferHandle = 0, - current_view_proj: Mat4 = Mat4.identity, - current_model: Mat4 = Mat4.identity, - current_color: [3]f32 = .{ 1.0, 1.0, 1.0 }, - current_mask_radius: f32 = 0.0, - mutex: std.Thread.Mutex = .{}, - clear_color: [4]f32 = .{ 0.07, 0.08, 0.1, 1.0 }, - - // UI Pipeline - ui_pipeline: c.VkPipeline = null, - ui_pipeline_layout: c.VkPipelineLayout = null, - ui_tex_pipeline: c.VkPipeline = null, - ui_tex_pipeline_layout: c.VkPipelineLayout = null, - ui_swapchain_pipeline: c.VkPipeline = null, - ui_swapchain_tex_pipeline: c.VkPipeline = null, - ui_swapchain_render_pass: c.VkRenderPass = null, - ui_swapchain_framebuffers: std.ArrayListUnmanaged(c.VkFramebuffer) = .empty, - ui_tex_descriptor_set_layout: c.VkDescriptorSetLayout = null, - ui_tex_descriptor_sets: [MAX_FRAMES_IN_FLIGHT]c.VkDescriptorSet = .{null} ** MAX_FRAMES_IN_FLIGHT, - ui_tex_descriptor_pool: [MAX_FRAMES_IN_FLIGHT][64]c.VkDescriptorSet = .{.{null} ** 64} ** MAX_FRAMES_IN_FLIGHT, - ui_tex_descriptor_next: [MAX_FRAMES_IN_FLIGHT]u32 = .{0} ** MAX_FRAMES_IN_FLIGHT, - ui_vbos: [MAX_FRAMES_IN_FLIGHT]VulkanBuffer = .{VulkanBuffer{}} ** MAX_FRAMES_IN_FLIGHT, - ui_screen_width: f32 = 0.0, - ui_screen_height: f32 = 0.0, - ui_using_swapchain: bool = false, - ui_in_progress: bool = false, - ui_vertex_offset: u64 = 0, - selection_mode: bool = false, - selection_pipeline: c.VkPipeline = null, - selection_pipeline_layout: c.VkPipelineLayout = null, - line_pipeline: c.VkPipeline = null, - ui_flushed_vertex_count: u32 = 0, - ui_mapped_ptr: ?*anyopaque = null, - - // Cloud Pipeline - cloud_pipeline: c.VkPipeline = null, - cloud_pipeline_layout: c.VkPipelineLayout = null, - cloud_vbo: VulkanBuffer = .{}, - cloud_ebo: VulkanBuffer = .{}, - cloud_mesh_size: f32 = 0.0, - cloud_vao: c.VkBuffer = null, - - // Post-Process Resources - hdr_image: c.VkImage = null, - hdr_memory: c.VkDeviceMemory = null, - hdr_view: c.VkImageView = null, - hdr_handle: rhi.TextureHandle = 0, - hdr_msaa_image: c.VkImage = null, - hdr_msaa_memory: c.VkDeviceMemory = null, - hdr_msaa_view: c.VkImageView = null, - - post_process_render_pass: c.VkRenderPass = null, - post_process_pipeline: c.VkPipeline = null, - post_process_pipeline_layout: c.VkPipelineLayout = null, - post_process_descriptor_set_layout: c.VkDescriptorSetLayout = null, - post_process_descriptor_sets: [MAX_FRAMES_IN_FLIGHT]c.VkDescriptorSet = .{null} ** MAX_FRAMES_IN_FLIGHT, - post_process_sampler: c.VkSampler = null, - post_process_pass_active: bool = false, - post_process_framebuffers: std.ArrayListUnmanaged(c.VkFramebuffer) = .empty, - hdr_render_pass: c.VkRenderPass = null, - - debug_shadow: DebugShadowResources = .{}, - - // Phase 3 Systems - fxaa: FXAASystem = .{}, - bloom: BloomSystem = .{}, - - // Phase 3: Velocity Buffer (prep for TAA/Motion Blur) - velocity_image: c.VkImage = null, - velocity_memory: c.VkDeviceMemory = null, - velocity_view: c.VkImageView = null, - velocity_handle: rhi.TextureHandle = 0, - view_proj_prev: Mat4 = Mat4.identity, - - // GPU Timing - query_pool: c.VkQueryPool = null, - timing_enabled: bool = true, // Default to true for debugging - timing_results: rhi.GpuTimingResults = undefined, -}; - -fn destroyHDRResources(ctx: *VulkanContext) void { - const vk = ctx.vulkan_device.vk_device; - if (ctx.hdr_view != null) { - c.vkDestroyImageView(vk, ctx.hdr_view, null); - ctx.hdr_view = null; - } - if (ctx.hdr_image != null) { - c.vkDestroyImage(vk, ctx.hdr_image, null); - ctx.hdr_image = null; - } - if (ctx.hdr_memory != null) { - c.vkFreeMemory(vk, ctx.hdr_memory, null); - ctx.hdr_memory = null; - } - if (ctx.hdr_msaa_view != null) { - c.vkDestroyImageView(vk, ctx.hdr_msaa_view, null); - ctx.hdr_msaa_view = null; - } - if (ctx.hdr_msaa_image != null) { - c.vkDestroyImage(vk, ctx.hdr_msaa_image, null); - ctx.hdr_msaa_image = null; - } - if (ctx.hdr_msaa_memory != null) { - c.vkFreeMemory(vk, ctx.hdr_msaa_memory, null); - ctx.hdr_msaa_memory = null; - } -} - -fn destroyPostProcessResources(ctx: *VulkanContext) void { - const vk = ctx.vulkan_device.vk_device; - // Destroy post-process framebuffers - for (ctx.post_process_framebuffers.items) |fb| { - c.vkDestroyFramebuffer(vk, fb, null); - } - ctx.post_process_framebuffers.deinit(ctx.allocator); - ctx.post_process_framebuffers = .empty; - - if (ctx.post_process_sampler != null) { - c.vkDestroySampler(vk, ctx.post_process_sampler, null); - ctx.post_process_sampler = null; - } - if (ctx.post_process_pipeline != null) { - c.vkDestroyPipeline(vk, ctx.post_process_pipeline, null); - ctx.post_process_pipeline = null; - } - if (ctx.post_process_pipeline_layout != null) { - c.vkDestroyPipelineLayout(vk, ctx.post_process_pipeline_layout, null); - ctx.post_process_pipeline_layout = null; - } - // Note: post_process_descriptor_set_layout is created once in initContext and NOT destroyed here - if (ctx.post_process_render_pass != null) { - c.vkDestroyRenderPass(vk, ctx.post_process_render_pass, null); - ctx.post_process_render_pass = null; - } - - destroySwapchainUIResources(ctx); -} - -fn destroyGPassResources(ctx: *VulkanContext) void { - const vk = ctx.vulkan_device.vk_device; - destroyVelocityResources(ctx); - ctx.ssao_system.deinit(vk, ctx.allocator); - if (ctx.g_pipeline != null) { - c.vkDestroyPipeline(vk, ctx.g_pipeline, null); - ctx.g_pipeline = null; - } - if (ctx.g_pipeline_layout != null) { - c.vkDestroyPipelineLayout(vk, ctx.g_pipeline_layout, null); - ctx.g_pipeline_layout = null; - } - if (ctx.g_framebuffer != null) { - c.vkDestroyFramebuffer(vk, ctx.g_framebuffer, null); - ctx.g_framebuffer = null; - } - if (ctx.g_render_pass != null) { - c.vkDestroyRenderPass(vk, ctx.g_render_pass, null); - ctx.g_render_pass = null; - } - if (ctx.g_normal_view != null) { - c.vkDestroyImageView(vk, ctx.g_normal_view, null); - ctx.g_normal_view = null; - } - if (ctx.g_normal_image != null) { - c.vkDestroyImage(vk, ctx.g_normal_image, null); - ctx.g_normal_image = null; - } - if (ctx.g_normal_memory != null) { - c.vkFreeMemory(vk, ctx.g_normal_memory, null); - ctx.g_normal_memory = null; - } - if (ctx.g_depth_view != null) { - c.vkDestroyImageView(vk, ctx.g_depth_view, null); - ctx.g_depth_view = null; - } - if (ctx.g_depth_image != null) { - c.vkDestroyImage(vk, ctx.g_depth_image, null); - ctx.g_depth_image = null; - } - if (ctx.g_depth_memory != null) { - c.vkFreeMemory(vk, ctx.g_depth_memory, null); - ctx.g_depth_memory = null; - } -} - -fn destroySwapchainUIPipelines(ctx: *VulkanContext) void { - const vk = ctx.vulkan_device.vk_device; - if (vk == null) return; - - if (ctx.ui_swapchain_pipeline != null) { - c.vkDestroyPipeline(vk, ctx.ui_swapchain_pipeline, null); - ctx.ui_swapchain_pipeline = null; - } - if (ctx.ui_swapchain_tex_pipeline != null) { - c.vkDestroyPipeline(vk, ctx.ui_swapchain_tex_pipeline, null); - ctx.ui_swapchain_tex_pipeline = null; - } -} - -fn destroySwapchainUIResources(ctx: *VulkanContext) void { - const vk = ctx.vulkan_device.vk_device; - if (vk == null) return; - - for (ctx.ui_swapchain_framebuffers.items) |fb| { - c.vkDestroyFramebuffer(vk, fb, null); - } - ctx.ui_swapchain_framebuffers.deinit(ctx.allocator); - ctx.ui_swapchain_framebuffers = .empty; - - if (ctx.ui_swapchain_render_pass != null) { - c.vkDestroyRenderPass(vk, ctx.ui_swapchain_render_pass, null); - ctx.ui_swapchain_render_pass = null; - } -} - -fn destroyFXAAResources(ctx: *VulkanContext) void { - destroySwapchainUIPipelines(ctx); - ctx.fxaa.deinit(ctx.vulkan_device.vk_device, ctx.allocator, ctx.descriptors.descriptor_pool); -} - -fn destroyBloomResources(ctx: *VulkanContext) void { - ctx.bloom.deinit(ctx.vulkan_device.vk_device, ctx.allocator, ctx.descriptors.descriptor_pool); -} - -fn destroyVelocityResources(ctx: *VulkanContext) void { - const vk = ctx.vulkan_device.vk_device; - if (vk == null) return; - - if (ctx.velocity_view != null) { - c.vkDestroyImageView(vk, ctx.velocity_view, null); - ctx.velocity_view = null; - } - if (ctx.velocity_image != null) { - c.vkDestroyImage(vk, ctx.velocity_image, null); - ctx.velocity_image = null; - } - if (ctx.velocity_memory != null) { - c.vkFreeMemory(vk, ctx.velocity_memory, null); - ctx.velocity_memory = null; - } -} - -/// Transitions an array of images to SHADER_READ_ONLY_OPTIMAL layout. -fn transitionImagesToShaderRead(ctx: *VulkanContext, images: []const c.VkImage, is_depth: bool) !void { - const aspect_mask: c.VkImageAspectFlags = if (is_depth) c.VK_IMAGE_ASPECT_DEPTH_BIT else c.VK_IMAGE_ASPECT_COLOR_BIT; - var alloc_info = std.mem.zeroes(c.VkCommandBufferAllocateInfo); - alloc_info.sType = c.VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; - alloc_info.level = c.VK_COMMAND_BUFFER_LEVEL_PRIMARY; - alloc_info.commandPool = ctx.frames.command_pool; - alloc_info.commandBufferCount = 1; - - var cmd: c.VkCommandBuffer = null; - try Utils.checkVk(c.vkAllocateCommandBuffers(ctx.vulkan_device.vk_device, &alloc_info, &cmd)); - var begin_info = std.mem.zeroes(c.VkCommandBufferBeginInfo); - begin_info.sType = c.VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; - begin_info.flags = c.VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; - try Utils.checkVk(c.vkBeginCommandBuffer(cmd, &begin_info)); - - const count = images.len; - var barriers: [16]c.VkImageMemoryBarrier = undefined; - for (0..count) |i| { - barriers[i] = std.mem.zeroes(c.VkImageMemoryBarrier); - barriers[i].sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barriers[i].oldLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - barriers[i].newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barriers[i].srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barriers[i].dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barriers[i].image = images[i]; - barriers[i].subresourceRange = .{ .aspectMask = aspect_mask, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; - barriers[i].srcAccessMask = 0; - barriers[i].dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - } - - c.vkCmdPipelineBarrier(cmd, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, @intCast(count), &barriers[0]); - - try Utils.checkVk(c.vkEndCommandBuffer(cmd)); - - var submit_info = std.mem.zeroes(c.VkSubmitInfo); - submit_info.sType = c.VK_STRUCTURE_TYPE_SUBMIT_INFO; - submit_info.commandBufferCount = 1; - submit_info.pCommandBuffers = &cmd; - try ctx.vulkan_device.submitGuarded(submit_info, null); - try Utils.checkVk(c.vkQueueWaitIdle(ctx.vulkan_device.queue)); - c.vkFreeCommandBuffers(ctx.vulkan_device.vk_device, ctx.frames.command_pool, 1, &cmd); -} - -/// Converts MSAA sample count (1, 2, 4, 8) to Vulkan sample count flag. -fn getMSAASampleCountFlag(samples: u8) c.VkSampleCountFlagBits { - return switch (samples) { - 2 => c.VK_SAMPLE_COUNT_2_BIT, - 4 => c.VK_SAMPLE_COUNT_4_BIT, - 8 => c.VK_SAMPLE_COUNT_8_BIT, - else => c.VK_SAMPLE_COUNT_1_BIT, - }; -} - -fn createHDRResources(ctx: *VulkanContext) !void { - const extent = ctx.swapchain.getExtent(); - const format = c.VK_FORMAT_R16G16B16A16_SFLOAT; - const sample_count = getMSAASampleCountFlag(ctx.msaa_samples); - - // 1. Create HDR image - var image_info = std.mem.zeroes(c.VkImageCreateInfo); - image_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - image_info.imageType = c.VK_IMAGE_TYPE_2D; - image_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; - image_info.mipLevels = 1; - image_info.arrayLayers = 1; - image_info.format = format; - image_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; - image_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - image_info.usage = c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; - image_info.samples = c.VK_SAMPLE_COUNT_1_BIT; - image_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; - - try Utils.checkVk(c.vkCreateImage(ctx.vulkan_device.vk_device, &image_info, null, &ctx.hdr_image)); - - var mem_reqs: c.VkMemoryRequirements = undefined; - c.vkGetImageMemoryRequirements(ctx.vulkan_device.vk_device, ctx.hdr_image, &mem_reqs); - var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); - alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; - alloc_info.allocationSize = mem_reqs.size; - alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - try Utils.checkVk(c.vkAllocateMemory(ctx.vulkan_device.vk_device, &alloc_info, null, &ctx.hdr_memory)); - try Utils.checkVk(c.vkBindImageMemory(ctx.vulkan_device.vk_device, ctx.hdr_image, ctx.hdr_memory, 0)); - - var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); - view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - view_info.image = ctx.hdr_image; - view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; - view_info.format = format; - view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; - try Utils.checkVk(c.vkCreateImageView(ctx.vulkan_device.vk_device, &view_info, null, &ctx.hdr_view)); - - // 2. Create MSAA HDR image if needed - if (ctx.msaa_samples > 1) { - image_info.samples = sample_count; - image_info.usage = c.VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; - try Utils.checkVk(c.vkCreateImage(ctx.vulkan_device.vk_device, &image_info, null, &ctx.hdr_msaa_image)); - c.vkGetImageMemoryRequirements(ctx.vulkan_device.vk_device, ctx.hdr_msaa_image, &mem_reqs); - alloc_info.allocationSize = mem_reqs.size; - alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - try Utils.checkVk(c.vkAllocateMemory(ctx.vulkan_device.vk_device, &alloc_info, null, &ctx.hdr_msaa_memory)); - try Utils.checkVk(c.vkBindImageMemory(ctx.vulkan_device.vk_device, ctx.hdr_msaa_image, ctx.hdr_msaa_memory, 0)); - - view_info.image = ctx.hdr_msaa_image; - try Utils.checkVk(c.vkCreateImageView(ctx.vulkan_device.vk_device, &view_info, null, &ctx.hdr_msaa_view)); - } -} - -fn createPostProcessResources(ctx: *VulkanContext) !void { - const vk = ctx.vulkan_device.vk_device; - - // 1. Render Pass - var color_attachment = std.mem.zeroes(c.VkAttachmentDescription); - color_attachment.format = ctx.swapchain.getImageFormat(); - color_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; - color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; - - var color_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; - - var subpass = std.mem.zeroes(c.VkSubpassDescription); - subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass.colorAttachmentCount = 1; - subpass.pColorAttachments = &color_ref; - - var dependency = std.mem.zeroes(c.VkSubpassDependency); - dependency.srcSubpass = c.VK_SUBPASS_EXTERNAL; - dependency.dstSubpass = 0; - dependency.srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - dependency.srcAccessMask = 0; - dependency.dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - dependency.dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - - var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); - rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - rp_info.attachmentCount = 1; - rp_info.pAttachments = &color_attachment; - rp_info.subpassCount = 1; - rp_info.pSubpasses = &subpass; - rp_info.dependencyCount = 1; - rp_info.pDependencies = &dependency; - - try Utils.checkVk(c.vkCreateRenderPass(vk, &rp_info, null, &ctx.post_process_render_pass)); - - // 2. Descriptor Set Layout (binding 0: HDR scene, binding 1: uniforms, binding 2: bloom) - if (ctx.post_process_descriptor_set_layout == null) { - var bindings = [_]c.VkDescriptorSetLayoutBinding{ - .{ .binding = 0, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, - .{ .binding = 1, .descriptorType = c.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, - .{ .binding = 2, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, - }; - var layout_info = std.mem.zeroes(c.VkDescriptorSetLayoutCreateInfo); - layout_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; - layout_info.bindingCount = 3; - layout_info.pBindings = &bindings[0]; - try Utils.checkVk(c.vkCreateDescriptorSetLayout(vk, &layout_info, null, &ctx.post_process_descriptor_set_layout)); - } - - // 3. Pipeline Layout (with push constants for bloom parameters) - if (ctx.post_process_pipeline_layout == null) { - var post_push_constant = std.mem.zeroes(c.VkPushConstantRange); - post_push_constant.stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT; - post_push_constant.offset = 0; - post_push_constant.size = 8; // 2 floats: bloomEnabled, bloomIntensity - - var pipe_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); - pipe_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - pipe_layout_info.setLayoutCount = 1; - pipe_layout_info.pSetLayouts = &ctx.post_process_descriptor_set_layout; - pipe_layout_info.pushConstantRangeCount = 1; - pipe_layout_info.pPushConstantRanges = &post_push_constant; - try Utils.checkVk(c.vkCreatePipelineLayout(vk, &pipe_layout_info, null, &ctx.post_process_pipeline_layout)); - } - - // 4. Create Linear Sampler - if (ctx.post_process_sampler != null) { - c.vkDestroySampler(vk, ctx.post_process_sampler, null); - ctx.post_process_sampler = null; - } - - var sampler_info = std.mem.zeroes(c.VkSamplerCreateInfo); - sampler_info.sType = c.VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; - sampler_info.magFilter = c.VK_FILTER_LINEAR; - sampler_info.minFilter = c.VK_FILTER_LINEAR; - sampler_info.addressModeU = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; - sampler_info.addressModeV = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; - sampler_info.addressModeW = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; - sampler_info.mipmapMode = c.VK_SAMPLER_MIPMAP_MODE_LINEAR; - var linear_sampler: c.VkSampler = null; - try Utils.checkVk(c.vkCreateSampler(vk, &sampler_info, null, &linear_sampler)); - errdefer c.vkDestroySampler(vk, linear_sampler, null); - - // 5. Pipeline - const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.POST_PROCESS_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(vert_code); - const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.POST_PROCESS_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(frag_code); - const vert_module = try Utils.createShaderModule(vk, vert_code); - defer c.vkDestroyShaderModule(vk, vert_module, null); - const frag_module = try Utils.createShaderModule(vk, frag_code); - defer c.vkDestroyShaderModule(vk, frag_module, null); - - var stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, - }; - - var vi_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - vi_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - var ia_info = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); - ia_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; - ia_info.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; - - var vp_info = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); - vp_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; - vp_info.viewportCount = 1; - vp_info.scissorCount = 1; - - var rs_info = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); - rs_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; - rs_info.lineWidth = 1.0; - rs_info.cullMode = c.VK_CULL_MODE_NONE; - rs_info.frontFace = c.VK_FRONT_FACE_COUNTER_CLOCKWISE; - - var ms_info = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); - ms_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; - ms_info.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; - - var cb_attach = std.mem.zeroes(c.VkPipelineColorBlendAttachmentState); - cb_attach.colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; - var cb_info = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); - cb_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; - cb_info.attachmentCount = 1; - cb_info.pAttachments = &cb_attach; - - var dyn_states = [_]c.VkDynamicState{ c.VK_DYNAMIC_STATE_VIEWPORT, c.VK_DYNAMIC_STATE_SCISSOR }; - var dyn_info = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); - dyn_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; - dyn_info.dynamicStateCount = 2; - dyn_info.pDynamicStates = &dyn_states[0]; - - var pipe_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - pipe_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipe_info.stageCount = 2; - pipe_info.pStages = &stages[0]; - pipe_info.pVertexInputState = &vi_info; - pipe_info.pInputAssemblyState = &ia_info; - pipe_info.pViewportState = &vp_info; - pipe_info.pRasterizationState = &rs_info; - pipe_info.pMultisampleState = &ms_info; - pipe_info.pColorBlendState = &cb_info; - pipe_info.pDynamicState = &dyn_info; - pipe_info.layout = ctx.post_process_pipeline_layout; - pipe_info.renderPass = ctx.post_process_render_pass; - - try Utils.checkVk(c.vkCreateGraphicsPipelines(vk, null, 1, &pipe_info, null, &ctx.post_process_pipeline)); - - // 6. Descriptor Sets - for (0..MAX_FRAMES_IN_FLIGHT) |i| { - if (ctx.post_process_descriptor_sets[i] == null) { - var alloc_ds_info = std.mem.zeroes(c.VkDescriptorSetAllocateInfo); - alloc_ds_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; - alloc_ds_info.descriptorPool = ctx.descriptors.descriptor_pool; - alloc_ds_info.descriptorSetCount = 1; - alloc_ds_info.pSetLayouts = &ctx.post_process_descriptor_set_layout; - try Utils.checkVk(c.vkAllocateDescriptorSets(vk, &alloc_ds_info, &ctx.post_process_descriptor_sets[i])); - } - - var image_info_ds = std.mem.zeroes(c.VkDescriptorImageInfo); - image_info_ds.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - image_info_ds.imageView = ctx.hdr_view; - image_info_ds.sampler = linear_sampler; - - var buffer_info_ds = std.mem.zeroes(c.VkDescriptorBufferInfo); - buffer_info_ds.buffer = ctx.descriptors.global_ubos[i].buffer; - buffer_info_ds.offset = 0; - buffer_info_ds.range = @sizeOf(GlobalUniforms); - - var writes = [_]c.VkWriteDescriptorSet{ - .{ - .sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, - .dstSet = ctx.post_process_descriptor_sets[i], - .dstBinding = 0, - .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, - .descriptorCount = 1, - .pImageInfo = &image_info_ds, - }, - .{ - .sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, - .dstSet = ctx.post_process_descriptor_sets[i], - .dstBinding = 1, - .descriptorType = c.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, - .descriptorCount = 1, - .pBufferInfo = &buffer_info_ds, - }, - .{ - .sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, - .dstSet = ctx.post_process_descriptor_sets[i], - .dstBinding = 2, - .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, - .descriptorCount = 1, - .pImageInfo = &image_info_ds, // Dummy: use HDR view as placeholder for bloom - }, - }; - c.vkUpdateDescriptorSets(vk, 3, &writes[0], 0, null); - } - - // 7. Create post-process framebuffers (one per swapchain image) - for (ctx.post_process_framebuffers.items) |fb| { - c.vkDestroyFramebuffer(vk, fb, null); - } - ctx.post_process_framebuffers.clearRetainingCapacity(); - - for (ctx.swapchain.getImageViews()) |iv| { - var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); - fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - fb_info.renderPass = ctx.post_process_render_pass; - fb_info.attachmentCount = 1; - fb_info.pAttachments = &iv; - fb_info.width = ctx.swapchain.getExtent().width; - fb_info.height = ctx.swapchain.getExtent().height; - fb_info.layers = 1; - - var fb: c.VkFramebuffer = null; - try Utils.checkVk(c.vkCreateFramebuffer(vk, &fb_info, null, &fb)); - try ctx.post_process_framebuffers.append(ctx.allocator, fb); - } - - // Clean up local sampler if not stored in context (but we should probably store it to destroy it later) - ctx.post_process_sampler = linear_sampler; -} - -fn createSwapchainUIResources(ctx: *VulkanContext) !void { - const vk = ctx.vulkan_device.vk_device; - - destroySwapchainUIResources(ctx); - errdefer destroySwapchainUIResources(ctx); - - var color_attachment = std.mem.zeroes(c.VkAttachmentDescription); - color_attachment.format = ctx.swapchain.getImageFormat(); - color_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; - color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_LOAD; - color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; - color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; - - var color_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; - - var subpass = std.mem.zeroes(c.VkSubpassDescription); - subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass.colorAttachmentCount = 1; - subpass.pColorAttachments = &color_ref; - - var dependency = std.mem.zeroes(c.VkSubpassDependency); - dependency.srcSubpass = c.VK_SUBPASS_EXTERNAL; - dependency.dstSubpass = 0; - dependency.srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - dependency.srcAccessMask = 0; - dependency.dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - dependency.dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | c.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; - dependency.dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT; - - var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); - rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - rp_info.attachmentCount = 1; - rp_info.pAttachments = &color_attachment; - rp_info.subpassCount = 1; - rp_info.pSubpasses = &subpass; - rp_info.dependencyCount = 1; - rp_info.pDependencies = &dependency; - - try Utils.checkVk(c.vkCreateRenderPass(vk, &rp_info, null, &ctx.ui_swapchain_render_pass)); - - for (ctx.swapchain.getImageViews()) |iv| { - var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); - fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - fb_info.renderPass = ctx.ui_swapchain_render_pass; - fb_info.attachmentCount = 1; - fb_info.pAttachments = &iv; - fb_info.width = ctx.swapchain.getExtent().width; - fb_info.height = ctx.swapchain.getExtent().height; - fb_info.layers = 1; - - var fb: c.VkFramebuffer = null; - try Utils.checkVk(c.vkCreateFramebuffer(vk, &fb_info, null, &fb)); - try ctx.ui_swapchain_framebuffers.append(ctx.allocator, fb); - } -} - -fn createShadowResources(ctx: *VulkanContext) !void { - const vk = ctx.vulkan_device.vk_device; - // 10. Shadow Pass (Created ONCE) - const shadow_res = ctx.shadow_resolution; - var shadow_depth_desc = std.mem.zeroes(c.VkAttachmentDescription); - shadow_depth_desc.format = DEPTH_FORMAT; - shadow_depth_desc.samples = c.VK_SAMPLE_COUNT_1_BIT; - shadow_depth_desc.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - shadow_depth_desc.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - shadow_depth_desc.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - shadow_depth_desc.finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - var shadow_depth_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }; - var shadow_subpass = std.mem.zeroes(c.VkSubpassDescription); - shadow_subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; - shadow_subpass.pDepthStencilAttachment = &shadow_depth_ref; - var shadow_rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); - shadow_rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - shadow_rp_info.attachmentCount = 1; - shadow_rp_info.pAttachments = &shadow_depth_desc; - shadow_rp_info.subpassCount = 1; - shadow_rp_info.pSubpasses = &shadow_subpass; - - // Add subpass dependencies for proper synchronization - var shadow_dependencies = [_]c.VkSubpassDependency{ - // 1. External -> Subpass 0: Wait for previous reads to finish before writing - .{ - .srcSubpass = c.VK_SUBPASS_EXTERNAL, - .dstSubpass = 0, - .srcStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, - .dstStageMask = c.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, - .srcAccessMask = c.VK_ACCESS_SHADER_READ_BIT, - .dstAccessMask = c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, - .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, - }, - // 2. Subpass 0 -> External: Wait for writes to finish before subsequent reads (sampling) - .{ - .srcSubpass = 0, - .dstSubpass = c.VK_SUBPASS_EXTERNAL, - .srcStageMask = c.VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, - .dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, - .srcAccessMask = c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, - .dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT, - .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, - }, - }; - shadow_rp_info.dependencyCount = 2; - shadow_rp_info.pDependencies = &shadow_dependencies; - - try Utils.checkVk(c.vkCreateRenderPass(ctx.vulkan_device.vk_device, &shadow_rp_info, null, &ctx.shadow_system.shadow_render_pass)); - - ctx.shadow_system.shadow_extent = .{ .width = shadow_res, .height = shadow_res }; - - var shadow_img_info = std.mem.zeroes(c.VkImageCreateInfo); - shadow_img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - shadow_img_info.imageType = c.VK_IMAGE_TYPE_2D; - shadow_img_info.extent = .{ .width = shadow_res, .height = shadow_res, .depth = 1 }; - shadow_img_info.mipLevels = 1; - shadow_img_info.arrayLayers = rhi.SHADOW_CASCADE_COUNT; - shadow_img_info.format = DEPTH_FORMAT; - shadow_img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; - shadow_img_info.usage = c.VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; - shadow_img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; - try Utils.checkVk(c.vkCreateImage(ctx.vulkan_device.vk_device, &shadow_img_info, null, &ctx.shadow_system.shadow_image)); - - var mem_reqs: c.VkMemoryRequirements = undefined; - c.vkGetImageMemoryRequirements(vk, ctx.shadow_system.shadow_image, &mem_reqs); - var alloc_info = c.VkMemoryAllocateInfo{ .sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .allocationSize = mem_reqs.size, .memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) }; - try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.shadow_system.shadow_image_memory)); - try Utils.checkVk(c.vkBindImageMemory(vk, ctx.shadow_system.shadow_image, ctx.shadow_system.shadow_image_memory, 0)); - - // Full array view for sampling - var array_view_info = std.mem.zeroes(c.VkImageViewCreateInfo); - array_view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - array_view_info.image = ctx.shadow_system.shadow_image; - array_view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D_ARRAY; - array_view_info.format = DEPTH_FORMAT; - array_view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = rhi.SHADOW_CASCADE_COUNT }; - try Utils.checkVk(c.vkCreateImageView(vk, &array_view_info, null, &ctx.shadow_system.shadow_image_view)); - - // Shadow Samplers - { - var sampler_info = std.mem.zeroes(c.VkSamplerCreateInfo); - sampler_info.sType = c.VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; - sampler_info.magFilter = c.VK_FILTER_LINEAR; - sampler_info.minFilter = c.VK_FILTER_LINEAR; - sampler_info.addressModeU = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; - sampler_info.addressModeV = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; - sampler_info.addressModeW = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; - sampler_info.anisotropyEnable = c.VK_FALSE; - sampler_info.maxAnisotropy = 1.0; - sampler_info.borderColor = c.VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK; - sampler_info.compareEnable = c.VK_TRUE; - sampler_info.compareOp = c.VK_COMPARE_OP_GREATER_OR_EQUAL; - - try Utils.checkVk(c.vkCreateSampler(vk, &sampler_info, null, &ctx.shadow_system.shadow_sampler)); - - // Regular sampler (no comparison) for debug visualization - var regular_sampler_info = sampler_info; - regular_sampler_info.compareEnable = c.VK_FALSE; - regular_sampler_info.compareOp = c.VK_COMPARE_OP_ALWAYS; - try Utils.checkVk(c.vkCreateSampler(vk, ®ular_sampler_info, null, &ctx.shadow_system.shadow_sampler_regular)); - } - - // Layered views for framebuffers (one per cascade) - for (0..rhi.SHADOW_CASCADE_COUNT) |si| { - var layer_view: c.VkImageView = null; - var layer_view_info = std.mem.zeroes(c.VkImageViewCreateInfo); - layer_view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - layer_view_info.image = ctx.shadow_system.shadow_image; - layer_view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; - layer_view_info.format = DEPTH_FORMAT; - layer_view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = @intCast(si), .layerCount = 1 }; - try Utils.checkVk(c.vkCreateImageView(vk, &layer_view_info, null, &layer_view)); - ctx.shadow_system.shadow_image_views[si] = layer_view; - - // Register shadow cascade as a texture handle for debug visualization - ctx.shadow_map_handles[si] = try ctx.resources.registerExternalTexture(shadow_res, shadow_res, .depth, layer_view, ctx.shadow_system.shadow_sampler_regular); - - var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); - fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - fb_info.renderPass = ctx.shadow_system.shadow_render_pass; - fb_info.attachmentCount = 1; - fb_info.pAttachments = &ctx.shadow_system.shadow_image_views[si]; - fb_info.width = shadow_res; - fb_info.height = shadow_res; - fb_info.layers = 1; - try Utils.checkVk(c.vkCreateFramebuffer(vk, &fb_info, null, &ctx.shadow_system.shadow_framebuffers[si])); - ctx.shadow_system.shadow_image_layouts[si] = c.VK_IMAGE_LAYOUT_UNDEFINED; - } - - const shadow_vert = try std.fs.cwd().readFileAlloc(shader_registry.SHADOW_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(shadow_vert); - const shadow_frag = try std.fs.cwd().readFileAlloc(shader_registry.SHADOW_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(shadow_frag); - - const shadow_vert_module = try Utils.createShaderModule(vk, shadow_vert); - defer c.vkDestroyShaderModule(vk, shadow_vert_module, null); - const shadow_frag_module = try Utils.createShaderModule(vk, shadow_frag); - defer c.vkDestroyShaderModule(vk, shadow_frag_module, null); - - var shadow_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = shadow_vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = shadow_frag_module, .pName = "main" }, - }; - - const shadow_binding = c.VkVertexInputBindingDescription{ .binding = 0, .stride = @sizeOf(rhi.Vertex), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; - var shadow_attrs: [2]c.VkVertexInputAttributeDescription = undefined; - shadow_attrs[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 0 }; - shadow_attrs[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 24 }; // normal offset - - var shadow_vertex_input = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - shadow_vertex_input.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - shadow_vertex_input.vertexBindingDescriptionCount = 1; - shadow_vertex_input.pVertexBindingDescriptions = &shadow_binding; - shadow_vertex_input.vertexAttributeDescriptionCount = 2; - shadow_vertex_input.pVertexAttributeDescriptions = &shadow_attrs[0]; - - var shadow_input_assembly = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); - shadow_input_assembly.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; - shadow_input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; - - var shadow_rasterizer = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); - shadow_rasterizer.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; - shadow_rasterizer.lineWidth = 1.0; - shadow_rasterizer.cullMode = c.VK_CULL_MODE_NONE; - shadow_rasterizer.frontFace = c.VK_FRONT_FACE_COUNTER_CLOCKWISE; - shadow_rasterizer.depthBiasEnable = c.VK_TRUE; - - var shadow_multisampling = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); - shadow_multisampling.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; - shadow_multisampling.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; - - var shadow_depth_stencil = std.mem.zeroes(c.VkPipelineDepthStencilStateCreateInfo); - shadow_depth_stencil.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; - shadow_depth_stencil.depthTestEnable = c.VK_TRUE; - shadow_depth_stencil.depthWriteEnable = c.VK_TRUE; - shadow_depth_stencil.depthCompareOp = c.VK_COMPARE_OP_GREATER_OR_EQUAL; - - var shadow_color_blend = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); - shadow_color_blend.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; - shadow_color_blend.attachmentCount = 0; - shadow_color_blend.pAttachments = null; - - const shadow_dynamic_states = [_]c.VkDynamicState{ - c.VK_DYNAMIC_STATE_VIEWPORT, - c.VK_DYNAMIC_STATE_SCISSOR, - c.VK_DYNAMIC_STATE_DEPTH_BIAS, - }; - var shadow_dynamic_state = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); - shadow_dynamic_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; - shadow_dynamic_state.dynamicStateCount = shadow_dynamic_states.len; - shadow_dynamic_state.pDynamicStates = &shadow_dynamic_states; - - var shadow_viewport_state = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); - shadow_viewport_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; - shadow_viewport_state.viewportCount = 1; - shadow_viewport_state.scissorCount = 1; - - var shadow_pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - shadow_pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - shadow_pipeline_info.stageCount = shadow_stages.len; - shadow_pipeline_info.pStages = &shadow_stages[0]; - shadow_pipeline_info.pVertexInputState = &shadow_vertex_input; - shadow_pipeline_info.pInputAssemblyState = &shadow_input_assembly; - shadow_pipeline_info.pViewportState = &shadow_viewport_state; - shadow_pipeline_info.pRasterizationState = &shadow_rasterizer; - shadow_pipeline_info.pMultisampleState = &shadow_multisampling; - shadow_pipeline_info.pDepthStencilState = &shadow_depth_stencil; - shadow_pipeline_info.pColorBlendState = &shadow_color_blend; - shadow_pipeline_info.pDynamicState = &shadow_dynamic_state; - shadow_pipeline_info.layout = ctx.pipeline_layout; - shadow_pipeline_info.renderPass = ctx.shadow_system.shadow_render_pass; - shadow_pipeline_info.subpass = 0; - - var new_pipeline: c.VkPipeline = null; - try Utils.checkVk(c.vkCreateGraphicsPipelines(vk, null, 1, &shadow_pipeline_info, null, &new_pipeline)); - - if (ctx.shadow_system.shadow_pipeline != null) { - c.vkDestroyPipeline(vk, ctx.shadow_system.shadow_pipeline, null); - } - ctx.shadow_system.shadow_pipeline = new_pipeline; -} - -/// Updates post-process descriptor sets to include bloom texture (called after bloom resources are created) -fn updatePostProcessDescriptorsWithBloom(ctx: *VulkanContext) void { - const vk = ctx.vulkan_device.vk_device; - - // Get bloom mip0 view (the final composited bloom result) - const bloom_view = if (ctx.bloom.mip_views[0] != null) ctx.bloom.mip_views[0] else return; - const sampler = if (ctx.bloom.sampler != null) ctx.bloom.sampler else ctx.post_process_sampler; - - for (0..MAX_FRAMES_IN_FLIGHT) |i| { - if (ctx.post_process_descriptor_sets[i] == null) continue; - - var bloom_image_info = std.mem.zeroes(c.VkDescriptorImageInfo); - bloom_image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - bloom_image_info.imageView = bloom_view; - bloom_image_info.sampler = sampler; - - var write = std.mem.zeroes(c.VkWriteDescriptorSet); - write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - write.dstSet = ctx.post_process_descriptor_sets[i]; - write.dstBinding = 2; - write.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - write.descriptorCount = 1; - write.pImageInfo = &bloom_image_info; - - c.vkUpdateDescriptorSets(vk, 1, &write, 0, null); - } -} - -fn createMainRenderPass(ctx: *VulkanContext) !void { - const sample_count = getMSAASampleCountFlag(ctx.msaa_samples); - const use_msaa = ctx.msaa_samples > 1; - const depth_format = DEPTH_FORMAT; - const hdr_format = c.VK_FORMAT_R16G16B16A16_SFLOAT; - - if (ctx.hdr_render_pass != null) { - c.vkDestroyRenderPass(ctx.vulkan_device.vk_device, ctx.hdr_render_pass, null); - ctx.hdr_render_pass = null; - } - - if (use_msaa) { - // MSAA render pass: 3 attachments (MSAA color, MSAA depth, resolve) - var msaa_color_attachment = std.mem.zeroes(c.VkAttachmentDescription); - msaa_color_attachment.format = hdr_format; - msaa_color_attachment.samples = sample_count; - msaa_color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - msaa_color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - msaa_color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - msaa_color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - msaa_color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - msaa_color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - - std.log.info("MSAA Render Pass: Color samples={}, Depth samples={}", .{ msaa_color_attachment.samples, sample_count }); - - var depth_attachment = std.mem.zeroes(c.VkAttachmentDescription); - depth_attachment.format = depth_format; - depth_attachment.samples = sample_count; - depth_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - depth_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - depth_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - depth_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - depth_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - depth_attachment.finalLayout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; - - var resolve_attachment = std.mem.zeroes(c.VkAttachmentDescription); - resolve_attachment.format = hdr_format; - resolve_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; - resolve_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - resolve_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - resolve_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - resolve_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - resolve_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - resolve_attachment.finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - - var color_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; - var depth_ref = c.VkAttachmentReference{ .attachment = 1, .layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }; - var resolve_ref = c.VkAttachmentReference{ .attachment = 2, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; - - var subpass = std.mem.zeroes(c.VkSubpassDescription); - subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass.colorAttachmentCount = 1; - subpass.pColorAttachments = &color_ref; - subpass.pDepthStencilAttachment = &depth_ref; - subpass.pResolveAttachments = &resolve_ref; - - var dependencies = [_]c.VkSubpassDependency{ - .{ - .srcSubpass = c.VK_SUBPASS_EXTERNAL, - .dstSubpass = 0, - .srcStageMask = c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, - .dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .srcAccessMask = c.VK_ACCESS_MEMORY_READ_BIT, - .dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, - }, - .{ - .srcSubpass = 0, - .dstSubpass = c.VK_SUBPASS_EXTERNAL, - .srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, - .srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - .dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT, - .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, - }, - }; - - var attachment_descs = [_]c.VkAttachmentDescription{ msaa_color_attachment, depth_attachment, resolve_attachment }; - var render_pass_info = std.mem.zeroes(c.VkRenderPassCreateInfo); - render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - render_pass_info.attachmentCount = 3; - render_pass_info.pAttachments = &attachment_descs[0]; - render_pass_info.subpassCount = 1; - render_pass_info.pSubpasses = &subpass; - render_pass_info.dependencyCount = 2; - render_pass_info.pDependencies = &dependencies[0]; - - try Utils.checkVk(c.vkCreateRenderPass(ctx.vulkan_device.vk_device, &render_pass_info, null, &ctx.hdr_render_pass)); - std.log.info("Created HDR MSAA {}x render pass", .{ctx.msaa_samples}); - } else { - // Non-MSAA render pass: 2 attachments (color, depth) - var color_attachment = std.mem.zeroes(c.VkAttachmentDescription); - color_attachment.format = hdr_format; - color_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; - color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - - var depth_attachment = std.mem.zeroes(c.VkAttachmentDescription); - depth_attachment.format = depth_format; - depth_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; - depth_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - depth_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - depth_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - depth_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - depth_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - depth_attachment.finalLayout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; - - var color_attachment_ref = std.mem.zeroes(c.VkAttachmentReference); - color_attachment_ref.attachment = 0; - color_attachment_ref.layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - - var depth_attachment_ref = std.mem.zeroes(c.VkAttachmentReference); - depth_attachment_ref.attachment = 1; - depth_attachment_ref.layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; - - var subpass = std.mem.zeroes(c.VkSubpassDescription); - subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass.colorAttachmentCount = 1; - subpass.pColorAttachments = &color_attachment_ref; - subpass.pDepthStencilAttachment = &depth_attachment_ref; - - var dependencies = [_]c.VkSubpassDependency{ - .{ - .srcSubpass = c.VK_SUBPASS_EXTERNAL, - .dstSubpass = 0, - .srcStageMask = c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, - .dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .srcAccessMask = c.VK_ACCESS_MEMORY_READ_BIT, - .dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, - }, - .{ - .srcSubpass = 0, - .dstSubpass = c.VK_SUBPASS_EXTERNAL, - .srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, - .srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - .dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT, - .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, - }, - }; - - var attachments = [_]c.VkAttachmentDescription{ color_attachment, depth_attachment }; - var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); - rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - rp_info.attachmentCount = 2; - rp_info.pAttachments = &attachments[0]; - rp_info.subpassCount = 1; - rp_info.pSubpasses = &subpass; - rp_info.dependencyCount = 2; - rp_info.pDependencies = &dependencies[0]; - - try Utils.checkVk(c.vkCreateRenderPass(ctx.vulkan_device.vk_device, &rp_info, null, &ctx.hdr_render_pass)); - } -} - -fn createGPassResources(ctx: *VulkanContext) !void { - destroyGPassResources(ctx); - const normal_format = c.VK_FORMAT_R8G8B8A8_UNORM; // Store normals in [0,1] range - const velocity_format = c.VK_FORMAT_R16G16_SFLOAT; // RG16F for velocity vectors - - // 1. Create G-Pass render pass (outputs: normal + velocity colors + depth) - { - var attachments: [3]c.VkAttachmentDescription = undefined; - - // Attachment 0: Normal buffer (color output) - attachments[0] = std.mem.zeroes(c.VkAttachmentDescription); - attachments[0].format = normal_format; - attachments[0].samples = c.VK_SAMPLE_COUNT_1_BIT; - attachments[0].loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - attachments[0].storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - attachments[0].stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - attachments[0].stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - attachments[0].initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - attachments[0].finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - - // Attachment 1: Velocity buffer (color output for motion vectors) - attachments[1] = std.mem.zeroes(c.VkAttachmentDescription); - attachments[1].format = velocity_format; - attachments[1].samples = c.VK_SAMPLE_COUNT_1_BIT; - attachments[1].loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - attachments[1].storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - attachments[1].stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - attachments[1].stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - attachments[1].initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - attachments[1].finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - - // Attachment 2: Depth buffer (shared with main pass for SSAO depth sampling) - attachments[2] = std.mem.zeroes(c.VkAttachmentDescription); - attachments[2].format = DEPTH_FORMAT; - attachments[2].samples = c.VK_SAMPLE_COUNT_1_BIT; - attachments[2].loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; - attachments[2].storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; - attachments[2].stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; - attachments[2].stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; - attachments[2].initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - attachments[2].finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - - var color_refs = [_]c.VkAttachmentReference{ - c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }, - c.VkAttachmentReference{ .attachment = 1, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }, - }; - var depth_ref = c.VkAttachmentReference{ .attachment = 2, .layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }; - - var subpass = std.mem.zeroes(c.VkSubpassDescription); - subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass.colorAttachmentCount = 2; - subpass.pColorAttachments = &color_refs; - subpass.pDepthStencilAttachment = &depth_ref; - - var dependencies: [2]c.VkSubpassDependency = undefined; - // Dependency 0: External -> G-Pass - dependencies[0] = std.mem.zeroes(c.VkSubpassDependency); - dependencies[0].srcSubpass = c.VK_SUBPASS_EXTERNAL; - dependencies[0].dstSubpass = 0; - dependencies[0].srcStageMask = c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - dependencies[0].dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | c.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; - dependencies[0].srcAccessMask = c.VK_ACCESS_MEMORY_READ_BIT; - dependencies[0].dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; - dependencies[0].dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT; - - // Dependency 1: G-Pass -> Fragment shader read (for SSAO) - dependencies[1] = std.mem.zeroes(c.VkSubpassDependency); - dependencies[1].srcSubpass = 0; - dependencies[1].dstSubpass = c.VK_SUBPASS_EXTERNAL; - dependencies[1].srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | c.VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; - dependencies[1].dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; - dependencies[1].srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; - dependencies[1].dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - dependencies[1].dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT; - - var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); - rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - rp_info.attachmentCount = 3; - rp_info.pAttachments = &attachments; - rp_info.subpassCount = 1; - rp_info.pSubpasses = &subpass; - rp_info.dependencyCount = 2; - rp_info.pDependencies = &dependencies; - - try Utils.checkVk(c.vkCreateRenderPass(ctx.vulkan_device.vk_device, &rp_info, null, &ctx.g_render_pass)); - } - - const vk = ctx.vulkan_device.vk_device; - const extent = ctx.swapchain.getExtent(); - - // 2. Create normal image for G-Pass output - { - var img_info = std.mem.zeroes(c.VkImageCreateInfo); - img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - img_info.imageType = c.VK_IMAGE_TYPE_2D; - img_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; - img_info.mipLevels = 1; - img_info.arrayLayers = 1; - img_info.format = normal_format; - img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; - img_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - img_info.usage = c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; - img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; - img_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; - - try Utils.checkVk(c.vkCreateImage(vk, &img_info, null, &ctx.g_normal_image)); - - var mem_reqs: c.VkMemoryRequirements = undefined; - c.vkGetImageMemoryRequirements(vk, ctx.g_normal_image, &mem_reqs); - - var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); - alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; - alloc_info.allocationSize = mem_reqs.size; - alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - - try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.g_normal_memory)); - try Utils.checkVk(c.vkBindImageMemory(vk, ctx.g_normal_image, ctx.g_normal_memory, 0)); - - var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); - view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - view_info.image = ctx.g_normal_image; - view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; - view_info.format = normal_format; - view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; - - try Utils.checkVk(c.vkCreateImageView(vk, &view_info, null, &ctx.g_normal_view)); - } - - // 3. Create velocity image for motion vectors (Phase 3) - { - var img_info = std.mem.zeroes(c.VkImageCreateInfo); - img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - img_info.imageType = c.VK_IMAGE_TYPE_2D; - img_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; - img_info.mipLevels = 1; - img_info.arrayLayers = 1; - img_info.format = velocity_format; - img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; - img_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - img_info.usage = c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; - img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; - img_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; - - try Utils.checkVk(c.vkCreateImage(vk, &img_info, null, &ctx.velocity_image)); - - var mem_reqs: c.VkMemoryRequirements = undefined; - c.vkGetImageMemoryRequirements(vk, ctx.velocity_image, &mem_reqs); - - var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); - alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; - alloc_info.allocationSize = mem_reqs.size; - alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - - try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.velocity_memory)); - try Utils.checkVk(c.vkBindImageMemory(vk, ctx.velocity_image, ctx.velocity_memory, 0)); - - var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); - view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - view_info.image = ctx.velocity_image; - view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; - view_info.format = velocity_format; - view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; - - try Utils.checkVk(c.vkCreateImageView(vk, &view_info, null, &ctx.velocity_view)); - } - - // 4. Create G-Pass depth image (separate from MSAA depth, 1x sampled for SSAO) - { - var img_info = std.mem.zeroes(c.VkImageCreateInfo); - img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - img_info.imageType = c.VK_IMAGE_TYPE_2D; - img_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; - img_info.mipLevels = 1; - img_info.arrayLayers = 1; - img_info.format = DEPTH_FORMAT; - img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; - img_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - img_info.usage = c.VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; - img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; - img_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; - - try Utils.checkVk(c.vkCreateImage(vk, &img_info, null, &ctx.g_depth_image)); - - var mem_reqs: c.VkMemoryRequirements = undefined; - c.vkGetImageMemoryRequirements(vk, ctx.g_depth_image, &mem_reqs); - - var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); - alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; - alloc_info.allocationSize = mem_reqs.size; - alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - - try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.g_depth_memory)); - try Utils.checkVk(c.vkBindImageMemory(vk, ctx.g_depth_image, ctx.g_depth_memory, 0)); - - var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); - view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - view_info.image = ctx.g_depth_image; - view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; - view_info.format = DEPTH_FORMAT; - view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; - - try Utils.checkVk(c.vkCreateImageView(vk, &view_info, null, &ctx.g_depth_view)); - } - - // 5. Create G-Pass framebuffer (3 attachments: normal, velocity, depth) - { - const fb_attachments = [_]c.VkImageView{ ctx.g_normal_view, ctx.velocity_view, ctx.g_depth_view }; - - var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); - fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - fb_info.renderPass = ctx.g_render_pass; - fb_info.attachmentCount = 3; - fb_info.pAttachments = &fb_attachments; - fb_info.width = extent.width; - fb_info.height = extent.height; - fb_info.layers = 1; - - try Utils.checkVk(c.vkCreateFramebuffer(vk, &fb_info, null, &ctx.g_framebuffer)); - } - - // Transition images to shader read layout - const g_images = [_]c.VkImage{ ctx.g_normal_image, ctx.velocity_image }; - try transitionImagesToShaderRead(ctx, &g_images, false); - const d_images = [_]c.VkImage{ctx.g_depth_image}; - try transitionImagesToShaderRead(ctx, &d_images, true); - - // Store the extent we created resources with for mismatch detection - ctx.g_pass_extent = extent; - std.log.info("G-Pass resources created ({}x{}) with velocity buffer", .{ extent.width, extent.height }); -} - -/// Creates SSAO resources: render pass, AO image, noise texture, kernel UBO, framebuffer, pipeline. -fn createSSAOResources(ctx: *VulkanContext) !void { - const extent = ctx.swapchain.getExtent(); - try ctx.ssao_system.init( - &ctx.vulkan_device, - ctx.allocator, - ctx.descriptors.descriptor_pool, - ctx.frames.command_pool, - extent.width, - extent.height, - ctx.g_normal_view, - ctx.g_depth_view, - ); - - // Register SSAO result for main pass - ctx.bound_ssao_handle = try ctx.resources.registerNativeTexture( - ctx.ssao_system.blur_image, - ctx.ssao_system.blur_view, - ctx.ssao_system.sampler, - extent.width, - extent.height, - .red, - ); - - // Update main descriptor sets with SSAO map (Binding 10) - for (0..MAX_FRAMES_IN_FLIGHT) |i| { - var main_ssao_info = c.VkDescriptorImageInfo{ - .sampler = ctx.ssao_system.sampler, - .imageView = ctx.ssao_system.blur_view, - .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - }; - var main_ssao_write = std.mem.zeroes(c.VkWriteDescriptorSet); - main_ssao_write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - main_ssao_write.dstSet = ctx.descriptors.descriptor_sets[i]; - main_ssao_write.dstBinding = 10; - main_ssao_write.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - main_ssao_write.descriptorCount = 1; - main_ssao_write.pImageInfo = &main_ssao_info; - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &main_ssao_write, 0, null); - - // Also update LOD descriptor sets - main_ssao_write.dstSet = ctx.descriptors.lod_descriptor_sets[i]; - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &main_ssao_write, 0, null); - } - - // 11. Transition SSAO images to SHADER_READ_ONLY_OPTIMAL - // This is needed because if SSAO is disabled, the pass is skipped, - // but the terrain shader still samples the (undefined) texture. - const ssao_images = [_]c.VkImage{ ctx.ssao_system.image, ctx.ssao_system.blur_image }; - try transitionImagesToShaderRead(ctx, &ssao_images, false); -} - -fn createMainFramebuffers(ctx: *VulkanContext) !void { - const use_msaa = ctx.msaa_samples > 1; - const extent = ctx.swapchain.getExtent(); - - var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); - fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - fb_info.renderPass = ctx.hdr_render_pass; - fb_info.width = extent.width; - fb_info.height = extent.height; - fb_info.layers = 1; - - // Destroy old framebuffer if it exists - if (ctx.main_framebuffer != null) { - c.vkDestroyFramebuffer(ctx.vulkan_device.vk_device, ctx.main_framebuffer, null); - ctx.main_framebuffer = null; - } - - if (use_msaa) { - std.log.info("Creating MSAA framebuffers with {} samples", .{ctx.msaa_samples}); - // [MSAA Color, MSAA Depth, Resolve HDR] - const attachments = [_]c.VkImageView{ ctx.hdr_msaa_view, ctx.swapchain.swapchain.depth_image_view, ctx.hdr_view }; - fb_info.attachmentCount = 3; - fb_info.pAttachments = &attachments[0]; - try Utils.checkVk(c.vkCreateFramebuffer(ctx.vulkan_device.vk_device, &fb_info, null, &ctx.main_framebuffer)); - } else { - // [HDR Color, Depth] - const attachments = [_]c.VkImageView{ ctx.hdr_view, ctx.swapchain.swapchain.depth_image_view }; - fb_info.attachmentCount = 2; - fb_info.pAttachments = &attachments[0]; - try Utils.checkVk(c.vkCreateFramebuffer(ctx.vulkan_device.vk_device, &fb_info, null, &ctx.main_framebuffer)); - } -} - -fn createMainPipelines(ctx: *VulkanContext) !void { - // Use common multisampling and viewport state - const sample_count = getMSAASampleCountFlag(ctx.msaa_samples); - - var viewport_state = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); - viewport_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; - viewport_state.viewportCount = 1; - viewport_state.scissorCount = 1; - - const dynamic_states = [_]c.VkDynamicState{ c.VK_DYNAMIC_STATE_VIEWPORT, c.VK_DYNAMIC_STATE_SCISSOR }; - var dynamic_state = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); - dynamic_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; - dynamic_state.dynamicStateCount = 2; - dynamic_state.pDynamicStates = &dynamic_states; - - var input_assembly = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); - input_assembly.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; - input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; - - var rasterizer = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); - rasterizer.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; - rasterizer.lineWidth = 1.0; - rasterizer.cullMode = c.VK_CULL_MODE_NONE; - rasterizer.frontFace = c.VK_FRONT_FACE_CLOCKWISE; - - var multisampling = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); - - multisampling.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; - multisampling.rasterizationSamples = sample_count; - - var depth_stencil = std.mem.zeroes(c.VkPipelineDepthStencilStateCreateInfo); - depth_stencil.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; - depth_stencil.depthTestEnable = c.VK_TRUE; - depth_stencil.depthWriteEnable = c.VK_TRUE; - depth_stencil.depthCompareOp = c.VK_COMPARE_OP_GREATER_OR_EQUAL; - - var color_blend_attachment = std.mem.zeroes(c.VkPipelineColorBlendAttachmentState); - color_blend_attachment.colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; - - var ui_color_blend_attachment = color_blend_attachment; - ui_color_blend_attachment.blendEnable = c.VK_TRUE; - ui_color_blend_attachment.srcColorBlendFactor = c.VK_BLEND_FACTOR_SRC_ALPHA; - ui_color_blend_attachment.dstColorBlendFactor = c.VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; - ui_color_blend_attachment.colorBlendOp = c.VK_BLEND_OP_ADD; - ui_color_blend_attachment.srcAlphaBlendFactor = c.VK_BLEND_FACTOR_ONE; - ui_color_blend_attachment.dstAlphaBlendFactor = c.VK_BLEND_FACTOR_ZERO; - ui_color_blend_attachment.alphaBlendOp = c.VK_BLEND_OP_ADD; - - var ui_color_blending = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); - ui_color_blending.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; - ui_color_blending.attachmentCount = 1; - ui_color_blending.pAttachments = &ui_color_blend_attachment; - - var terrain_color_blending = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); - terrain_color_blending.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; - terrain_color_blending.attachmentCount = 1; - terrain_color_blending.pAttachments = &color_blend_attachment; - - // Terrain Pipeline - { - const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.TERRAIN_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(vert_code); - const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.TERRAIN_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(frag_code); - const vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, vert_module, null); - const frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, frag_module, null); - var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, - }; - const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = @sizeOf(rhi.Vertex), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; - var attribute_descriptions: [8]c.VkVertexInputAttributeDescription = undefined; - attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 0 }; - attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 3 * 4 }; - attribute_descriptions[2] = .{ .binding = 0, .location = 2, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 6 * 4 }; - attribute_descriptions[3] = .{ .binding = 0, .location = 3, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 9 * 4 }; - attribute_descriptions[4] = .{ .binding = 0, .location = 4, .format = c.VK_FORMAT_R32_SFLOAT, .offset = 11 * 4 }; - attribute_descriptions[5] = .{ .binding = 0, .location = 5, .format = c.VK_FORMAT_R32_SFLOAT, .offset = 12 * 4 }; - attribute_descriptions[6] = .{ .binding = 0, .location = 6, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 13 * 4 }; - attribute_descriptions[7] = .{ .binding = 0, .location = 7, .format = c.VK_FORMAT_R32_SFLOAT, .offset = 16 * 4 }; // AO - var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - vertex_input_info.vertexBindingDescriptionCount = 1; - vertex_input_info.pVertexBindingDescriptions = &binding_description; - vertex_input_info.vertexAttributeDescriptionCount = 8; - vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; - var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipeline_info.stageCount = 2; - pipeline_info.pStages = &shader_stages[0]; - pipeline_info.pVertexInputState = &vertex_input_info; - pipeline_info.pInputAssemblyState = &input_assembly; - pipeline_info.pViewportState = &viewport_state; - pipeline_info.pRasterizationState = &rasterizer; - pipeline_info.pMultisampleState = &multisampling; - pipeline_info.pDepthStencilState = &depth_stencil; - pipeline_info.pColorBlendState = &terrain_color_blending; - pipeline_info.pDynamicState = &dynamic_state; - pipeline_info.layout = ctx.pipeline_layout; - pipeline_info.renderPass = ctx.hdr_render_pass; - pipeline_info.subpass = 0; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.pipeline)); - - // Wireframe (No culling) - var wireframe_rasterizer = rasterizer; - wireframe_rasterizer.cullMode = c.VK_CULL_MODE_NONE; - wireframe_rasterizer.polygonMode = c.VK_POLYGON_MODE_LINE; - pipeline_info.pRasterizationState = &wireframe_rasterizer; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.wireframe_pipeline)); - - // Selection (Wireframe on HDR pass) - var selection_rasterizer = rasterizer; - selection_rasterizer.cullMode = c.VK_CULL_MODE_NONE; - selection_rasterizer.polygonMode = c.VK_POLYGON_MODE_FILL; // Use fill since vertices are quads - var selection_pipeline_info = pipeline_info; - selection_pipeline_info.pRasterizationState = &selection_rasterizer; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &selection_pipeline_info, null, &ctx.selection_pipeline)); - - // Line Pipeline - var line_input_assembly = input_assembly; - line_input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_LINE_LIST; - var line_pipeline_info = pipeline_info; - line_pipeline_info.pInputAssemblyState = &line_input_assembly; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &line_pipeline_info, null, &ctx.line_pipeline)); - - // 1.5 G-Pass Pipeline (1-sample, 2 color attachments: normal, velocity) - { - const g_frag_code = try std.fs.cwd().readFileAlloc(shader_registry.G_PASS_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(g_frag_code); - const g_frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, g_frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, g_frag_module, null); - - var g_shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = g_frag_module, .pName = "main" }, - }; - - var g_color_blend_attachments = [_]c.VkPipelineColorBlendAttachmentState{ - color_blend_attachment, // Normal - color_blend_attachment, // Velocity - }; - var g_color_blending = terrain_color_blending; - g_color_blending.attachmentCount = 2; - g_color_blending.pAttachments = &g_color_blend_attachments[0]; - - var g_multisampling = multisampling; - g_multisampling.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; - - var g_pipeline_info = pipeline_info; - g_pipeline_info.stageCount = 2; - g_pipeline_info.pStages = &g_shader_stages[0]; - g_pipeline_info.pMultisampleState = &g_multisampling; - g_pipeline_info.pColorBlendState = &g_color_blending; - g_pipeline_info.renderPass = ctx.g_render_pass; - g_pipeline_info.subpass = 0; - - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &g_pipeline_info, null, &ctx.g_pipeline)); - } - } - - // Sky - { - rasterizer.cullMode = c.VK_CULL_MODE_NONE; - const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.SKY_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(vert_code); - const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.SKY_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(frag_code); - const vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, vert_module, null); - const frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, frag_module, null); - var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, - }; - var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - var sky_depth_stencil = depth_stencil; - sky_depth_stencil.depthWriteEnable = c.VK_FALSE; - var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipeline_info.stageCount = 2; - pipeline_info.pStages = &shader_stages[0]; - pipeline_info.pVertexInputState = &vertex_input_info; - pipeline_info.pInputAssemblyState = &input_assembly; - pipeline_info.pViewportState = &viewport_state; - pipeline_info.pRasterizationState = &rasterizer; - pipeline_info.pMultisampleState = &multisampling; - pipeline_info.pDepthStencilState = &sky_depth_stencil; - pipeline_info.pColorBlendState = &terrain_color_blending; - pipeline_info.pDynamicState = &dynamic_state; - pipeline_info.layout = ctx.sky_pipeline_layout; - pipeline_info.renderPass = ctx.hdr_render_pass; - pipeline_info.subpass = 0; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.sky_pipeline)); - } - - // UI - { - const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(vert_code); - const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(frag_code); - const vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, vert_module, null); - const frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, frag_module, null); - var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, - }; - const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 6 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; - var attribute_descriptions: [2]c.VkVertexInputAttributeDescription = undefined; - attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; - attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32A32_SFLOAT, .offset = 2 * 4 }; - var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - vertex_input_info.vertexBindingDescriptionCount = 1; - vertex_input_info.pVertexBindingDescriptions = &binding_description; - vertex_input_info.vertexAttributeDescriptionCount = 2; - vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; - var ui_depth_stencil = depth_stencil; - ui_depth_stencil.depthTestEnable = c.VK_FALSE; - ui_depth_stencil.depthWriteEnable = c.VK_FALSE; - var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipeline_info.stageCount = 2; - pipeline_info.pStages = &shader_stages[0]; - pipeline_info.pVertexInputState = &vertex_input_info; - pipeline_info.pInputAssemblyState = &input_assembly; - pipeline_info.pViewportState = &viewport_state; - pipeline_info.pRasterizationState = &rasterizer; - pipeline_info.pMultisampleState = &multisampling; - pipeline_info.pDepthStencilState = &ui_depth_stencil; - pipeline_info.pColorBlendState = &ui_color_blending; - pipeline_info.pDynamicState = &dynamic_state; - pipeline_info.layout = ctx.ui_pipeline_layout; - pipeline_info.renderPass = ctx.hdr_render_pass; - pipeline_info.subpass = 0; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.ui_pipeline)); - - // Textured UI - const tex_vert_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_TEX_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(tex_vert_code); - const tex_frag_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_TEX_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(tex_frag_code); - const tex_vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, tex_vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, tex_vert_module, null); - const tex_frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, tex_frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, tex_frag_module, null); - var tex_shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = tex_vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = tex_frag_module, .pName = "main" }, - }; - pipeline_info.pStages = &tex_shader_stages[0]; - pipeline_info.layout = ctx.ui_tex_pipeline_layout; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.ui_tex_pipeline)); - } - - // Debug Shadow - if (comptime build_options.debug_shadows) { - const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.DEBUG_SHADOW_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(vert_code); - const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.DEBUG_SHADOW_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(frag_code); - const vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, vert_module, null); - const frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, frag_module, null); - var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, - }; - const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 4 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; - var attribute_descriptions: [2]c.VkVertexInputAttributeDescription = undefined; - attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; - attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 2 * 4 }; - var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - vertex_input_info.vertexBindingDescriptionCount = 1; - vertex_input_info.pVertexBindingDescriptions = &binding_description; - vertex_input_info.vertexAttributeDescriptionCount = 2; - vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; - var ui_depth_stencil = depth_stencil; - ui_depth_stencil.depthTestEnable = c.VK_FALSE; - ui_depth_stencil.depthWriteEnable = c.VK_FALSE; - var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipeline_info.stageCount = 2; - pipeline_info.pStages = &shader_stages[0]; - pipeline_info.pVertexInputState = &vertex_input_info; - pipeline_info.pInputAssemblyState = &input_assembly; - pipeline_info.pViewportState = &viewport_state; - pipeline_info.pRasterizationState = &rasterizer; - pipeline_info.pMultisampleState = &multisampling; - pipeline_info.pDepthStencilState = &ui_depth_stencil; - pipeline_info.pColorBlendState = &ui_color_blending; - pipeline_info.pDynamicState = &dynamic_state; - pipeline_info.layout = ctx.debug_shadow.pipeline_layout orelse return error.InitializationFailed; - pipeline_info.renderPass = ctx.hdr_render_pass; - pipeline_info.subpass = 0; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.debug_shadow.pipeline)); - } - - // Cloud - { - const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.CLOUD_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(vert_code); - const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.CLOUD_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(frag_code); - const vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, vert_module, null); - const frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, frag_module, null); - var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, - }; - const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 2 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; - var attribute_descriptions: [1]c.VkVertexInputAttributeDescription = undefined; - attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; - var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - vertex_input_info.vertexBindingDescriptionCount = 1; - vertex_input_info.pVertexBindingDescriptions = &binding_description; - vertex_input_info.vertexAttributeDescriptionCount = 1; - vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; - var cloud_depth_stencil = depth_stencil; - cloud_depth_stencil.depthWriteEnable = c.VK_FALSE; - var cloud_rasterizer = rasterizer; - cloud_rasterizer.frontFace = c.VK_FRONT_FACE_COUNTER_CLOCKWISE; - var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipeline_info.stageCount = 2; - pipeline_info.pStages = &shader_stages[0]; - pipeline_info.pVertexInputState = &vertex_input_info; - pipeline_info.pInputAssemblyState = &input_assembly; - pipeline_info.pViewportState = &viewport_state; - pipeline_info.pRasterizationState = &cloud_rasterizer; - pipeline_info.pMultisampleState = &multisampling; - pipeline_info.pDepthStencilState = &cloud_depth_stencil; - pipeline_info.pColorBlendState = &ui_color_blending; - pipeline_info.pDynamicState = &dynamic_state; - pipeline_info.layout = ctx.cloud_pipeline_layout; - pipeline_info.renderPass = ctx.hdr_render_pass; - pipeline_info.subpass = 0; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.cloud_pipeline)); - } -} - -fn createSwapchainUIPipelines(ctx: *VulkanContext) !void { - if (ctx.ui_swapchain_render_pass == null) return error.InitializationFailed; - - destroySwapchainUIPipelines(ctx); - errdefer destroySwapchainUIPipelines(ctx); - - var viewport_state = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); - viewport_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; - viewport_state.viewportCount = 1; - viewport_state.scissorCount = 1; - - const dynamic_states = [_]c.VkDynamicState{ c.VK_DYNAMIC_STATE_VIEWPORT, c.VK_DYNAMIC_STATE_SCISSOR }; - var dynamic_state = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); - dynamic_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; - dynamic_state.dynamicStateCount = 2; - dynamic_state.pDynamicStates = &dynamic_states; - - var input_assembly = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); - input_assembly.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; - input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; - - var rasterizer = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); - rasterizer.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; - rasterizer.lineWidth = 1.0; - rasterizer.cullMode = c.VK_CULL_MODE_NONE; - rasterizer.frontFace = c.VK_FRONT_FACE_CLOCKWISE; - - var multisampling = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); - multisampling.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; - multisampling.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; - - var depth_stencil = std.mem.zeroes(c.VkPipelineDepthStencilStateCreateInfo); - depth_stencil.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; - depth_stencil.depthTestEnable = c.VK_FALSE; - depth_stencil.depthWriteEnable = c.VK_FALSE; - - var ui_color_blend_attachment = std.mem.zeroes(c.VkPipelineColorBlendAttachmentState); - ui_color_blend_attachment.colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; - ui_color_blend_attachment.blendEnable = c.VK_TRUE; - ui_color_blend_attachment.srcColorBlendFactor = c.VK_BLEND_FACTOR_SRC_ALPHA; - ui_color_blend_attachment.dstColorBlendFactor = c.VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; - ui_color_blend_attachment.colorBlendOp = c.VK_BLEND_OP_ADD; - ui_color_blend_attachment.srcAlphaBlendFactor = c.VK_BLEND_FACTOR_ONE; - ui_color_blend_attachment.dstAlphaBlendFactor = c.VK_BLEND_FACTOR_ZERO; - ui_color_blend_attachment.alphaBlendOp = c.VK_BLEND_OP_ADD; - - var ui_color_blending = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); - ui_color_blending.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; - ui_color_blending.attachmentCount = 1; - ui_color_blending.pAttachments = &ui_color_blend_attachment; - - // UI - { - const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(vert_code); - const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(frag_code); - const vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, vert_module, null); - const frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, frag_module, null); - var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, - }; - const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 6 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; - var attribute_descriptions: [2]c.VkVertexInputAttributeDescription = undefined; - attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; - attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32A32_SFLOAT, .offset = 2 * 4 }; - var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); - vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - vertex_input_info.vertexBindingDescriptionCount = 1; - vertex_input_info.pVertexBindingDescriptions = &binding_description; - vertex_input_info.vertexAttributeDescriptionCount = 2; - vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; - var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); - pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipeline_info.stageCount = 2; - pipeline_info.pStages = &shader_stages[0]; - pipeline_info.pVertexInputState = &vertex_input_info; - pipeline_info.pInputAssemblyState = &input_assembly; - pipeline_info.pViewportState = &viewport_state; - pipeline_info.pRasterizationState = &rasterizer; - pipeline_info.pMultisampleState = &multisampling; - pipeline_info.pDepthStencilState = &depth_stencil; - pipeline_info.pColorBlendState = &ui_color_blending; - pipeline_info.pDynamicState = &dynamic_state; - pipeline_info.layout = ctx.ui_pipeline_layout; - pipeline_info.renderPass = ctx.ui_swapchain_render_pass; - pipeline_info.subpass = 0; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.ui_swapchain_pipeline)); - - // Textured UI - const tex_vert_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_TEX_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(tex_vert_code); - const tex_frag_code = try std.fs.cwd().readFileAlloc(shader_registry.UI_TEX_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); - defer ctx.allocator.free(tex_frag_code); - const tex_vert_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, tex_vert_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, tex_vert_module, null); - const tex_frag_module = try Utils.createShaderModule(ctx.vulkan_device.vk_device, tex_frag_code); - defer c.vkDestroyShaderModule(ctx.vulkan_device.vk_device, tex_frag_module, null); - var tex_shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = tex_vert_module, .pName = "main" }, - .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = tex_frag_module, .pName = "main" }, - }; - pipeline_info.pStages = &tex_shader_stages[0]; - pipeline_info.layout = ctx.ui_tex_pipeline_layout; - pipeline_info.renderPass = ctx.ui_swapchain_render_pass; - try Utils.checkVk(c.vkCreateGraphicsPipelines(ctx.vulkan_device.vk_device, null, 1, &pipeline_info, null, &ctx.ui_swapchain_tex_pipeline)); - } -} - -fn destroyMainRenderPassAndPipelines(ctx: *VulkanContext) void { - if (ctx.vulkan_device.vk_device == null) return; - _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); - - if (ctx.main_framebuffer != null) { - c.vkDestroyFramebuffer(ctx.vulkan_device.vk_device, ctx.main_framebuffer, null); - ctx.main_framebuffer = null; - } - - if (ctx.pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline, null); - ctx.pipeline = null; - } - if (ctx.wireframe_pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.wireframe_pipeline, null); - ctx.wireframe_pipeline = null; - } - if (ctx.selection_pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.selection_pipeline, null); - ctx.selection_pipeline = null; - } - if (ctx.line_pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.line_pipeline, null); - ctx.line_pipeline = null; - } - // Note: shadow_pipeline and shadow_render_pass are NOT destroyed here - // because they don't depend on the swapchain or MSAA settings. - - if (ctx.sky_pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.sky_pipeline, null); - ctx.sky_pipeline = null; - } - if (ctx.ui_pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.ui_pipeline, null); - ctx.ui_pipeline = null; - } - if (ctx.ui_tex_pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.ui_tex_pipeline, null); - ctx.ui_tex_pipeline = null; - } - if (comptime build_options.debug_shadows) { - if (ctx.debug_shadow.pipeline) |pipeline| c.vkDestroyPipeline(ctx.vulkan_device.vk_device, pipeline, null); - ctx.debug_shadow.pipeline = null; - } - - if (ctx.cloud_pipeline != null) { - c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.cloud_pipeline, null); - ctx.cloud_pipeline = null; - } - if (ctx.hdr_render_pass != null) { - c.vkDestroyRenderPass(ctx.vulkan_device.vk_device, ctx.hdr_render_pass, null); - ctx.hdr_render_pass = null; - } -} +const frame_orchestration = @import("vulkan/rhi_frame_orchestration.zig"); +const pass_orchestration = @import("vulkan/rhi_pass_orchestration.zig"); +const draw_submission = @import("vulkan/rhi_draw_submission.zig"); +const ui_submission = @import("vulkan/rhi_ui_submission.zig"); +const timing = @import("vulkan/rhi_timing.zig"); +const context_factory = @import("vulkan/rhi_context_factory.zig"); +const state_control = @import("vulkan/rhi_state_control.zig"); +const shadow_bridge = @import("vulkan/rhi_shadow_bridge.zig"); +const native_access = @import("vulkan/rhi_native_access.zig"); +const render_state = @import("vulkan/rhi_render_state.zig"); +const init_deinit = @import("vulkan/rhi_init_deinit.zig"); +const rhi_timing = @import("vulkan/rhi_timing.zig"); + +const QUERY_COUNT_PER_FRAME = rhi_timing.QUERY_COUNT_PER_FRAME; + +const VulkanContext = @import("vulkan/rhi_context_types.zig").VulkanContext; fn initContext(ctx_ptr: *anyopaque, allocator: std.mem.Allocator, render_device: ?*RenderDevice) anyerror!void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - // Ensure we cleanup everything on error - errdefer deinit(ctx_ptr); - - ctx.allocator = allocator; - ctx.render_device = render_device; - - ctx.vulkan_device = try VulkanDevice.init(allocator, ctx.window); - ctx.vulkan_device.initDebugMessenger(); - ctx.resources = try ResourceManager.init(allocator, &ctx.vulkan_device); - ctx.frames = try FrameManager.init(&ctx.vulkan_device); - ctx.swapchain = try SwapchainPresenter.init(allocator, &ctx.vulkan_device, ctx.window, ctx.msaa_samples); - ctx.descriptors = try DescriptorManager.init(allocator, &ctx.vulkan_device, &ctx.resources); - - ctx.shadow_system = try ShadowSystem.init(allocator, ctx.shadow_resolution); - - // Initialize defaults - ctx.dummy_shadow_image = null; - ctx.dummy_shadow_memory = null; - ctx.dummy_shadow_view = null; - ctx.clear_color = .{ 0.07, 0.08, 0.1, 1.0 }; - ctx.frames.frame_in_progress = false; - ctx.main_pass_active = false; - ctx.shadow_system.pass_active = false; - ctx.shadow_system.pass_index = 0; - ctx.ui_in_progress = false; - ctx.ui_mapped_ptr = null; - ctx.ui_vertex_offset = 0; - - // Optimization state tracking - ctx.terrain_pipeline_bound = false; - ctx.shadow_system.pipeline_bound = false; - ctx.descriptors_updated = false; - ctx.bound_texture = 0; - ctx.bound_normal_texture = 0; - ctx.bound_roughness_texture = 0; - ctx.bound_displacement_texture = 0; - ctx.bound_env_texture = 0; - ctx.current_mask_radius = 0; - ctx.lod_mode = false; - ctx.pending_instance_buffer = 0; - ctx.pending_lod_instance_buffer = 0; - - // Rendering options - ctx.wireframe_enabled = false; - ctx.textures_enabled = true; - ctx.vsync_enabled = true; - ctx.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; - - const safe_mode_env = std.posix.getenv("ZIGCRAFT_SAFE_MODE"); - ctx.safe_mode = if (safe_mode_env) |val| - !(std.mem.eql(u8, val, "0") or std.mem.eql(u8, val, "false")) - else - false; - if (ctx.safe_mode) { - std.log.warn("ZIGCRAFT_SAFE_MODE enabled: throttling uploads and forcing GPU idle each frame", .{}); - } - - // Pipeline Layouts (using DescriptorManager's layout) - var model_push_constant = std.mem.zeroes(c.VkPushConstantRange); - model_push_constant.stageFlags = c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT; - model_push_constant.size = 256; - var pipeline_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); - pipeline_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - pipeline_layout_info.setLayoutCount = 1; - pipeline_layout_info.pSetLayouts = &ctx.descriptors.descriptor_set_layout; - pipeline_layout_info.pushConstantRangeCount = 1; - pipeline_layout_info.pPushConstantRanges = &model_push_constant; - try Utils.checkVk(c.vkCreatePipelineLayout(ctx.vulkan_device.vk_device, &pipeline_layout_info, null, &ctx.pipeline_layout)); - - var sky_push_constant = std.mem.zeroes(c.VkPushConstantRange); - sky_push_constant.stageFlags = c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT; - sky_push_constant.size = 128; - var sky_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); - sky_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - sky_layout_info.setLayoutCount = 1; - sky_layout_info.pSetLayouts = &ctx.descriptors.descriptor_set_layout; - sky_layout_info.pushConstantRangeCount = 1; - sky_layout_info.pPushConstantRanges = &sky_push_constant; - try Utils.checkVk(c.vkCreatePipelineLayout(ctx.vulkan_device.vk_device, &sky_layout_info, null, &ctx.sky_pipeline_layout)); - - var ui_push_constant = std.mem.zeroes(c.VkPushConstantRange); - ui_push_constant.stageFlags = c.VK_SHADER_STAGE_VERTEX_BIT; - ui_push_constant.size = @sizeOf(Mat4); - var ui_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); - ui_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - ui_layout_info.pushConstantRangeCount = 1; - ui_layout_info.pPushConstantRanges = &ui_push_constant; - try Utils.checkVk(c.vkCreatePipelineLayout(ctx.vulkan_device.vk_device, &ui_layout_info, null, &ctx.ui_pipeline_layout)); - - // UI Tex Pipeline Layout - needs a separate descriptor layout for texture only? - // rhi_vulkan.zig created `ui_tex_descriptor_set_layout` locally. - // I should move that to DescriptorManager too? Or keep it local? - // It's local to UI. DescriptorManager handles the *Main* descriptor set. - // I'll recreate it here locally as it was. - var ui_tex_layout_bindings = [_]c.VkDescriptorSetLayoutBinding{ - .{ .binding = 0, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, - }; - var ui_tex_layout_info = std.mem.zeroes(c.VkDescriptorSetLayoutCreateInfo); - ui_tex_layout_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; - ui_tex_layout_info.bindingCount = 1; - ui_tex_layout_info.pBindings = &ui_tex_layout_bindings[0]; - try Utils.checkVk(c.vkCreateDescriptorSetLayout(ctx.vulkan_device.vk_device, &ui_tex_layout_info, null, &ctx.ui_tex_descriptor_set_layout)); - - // Also need to create the pool for UI tex descriptors? - // Original code created `ui_tex_descriptor_pool` logic... wait, where is it? - // It seems original code initialized `ui_tex_descriptor_pool` in the loop at the end of initContext. - // I need to allocate that pool. - var ui_pool_sizes = [_]c.VkDescriptorPoolSize{ - .{ .type = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = MAX_FRAMES_IN_FLIGHT * 64 }, - }; - var ui_pool_info = std.mem.zeroes(c.VkDescriptorPoolCreateInfo); - ui_pool_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; - ui_pool_info.poolSizeCount = 1; - ui_pool_info.pPoolSizes = &ui_pool_sizes[0]; - ui_pool_info.maxSets = MAX_FRAMES_IN_FLIGHT * 64; - // We don't have a field for this pool in VulkanContext? - // Ah, `ui_tex_descriptor_pool` is an array of sets `[MAX_FRAMES][64]VkDescriptorSet`. - // The pool must be `descriptor_pool` or similar? - // Original code used `ctx.descriptors.descriptor_pool`? No, that was for main sets. - // Actually, original code didn't show creation of a separate pool for UI. - // Let me check `initContext` again. - // Line 1997: `ctx.descriptors.descriptor_pool` created. - // Line 2027: `ctx.ui_tex_descriptor_set_layout` created. - // UI descriptors are allocated in `drawTexture2D`. - // They are allocated from `ctx.descriptors.descriptor_pool`? - // `drawTexture2D` line 5081 calls `c.vkUpdateDescriptorSets`. It assumes sets are allocated. - // Where are they allocated? - // They are pre-allocated in `initContext`? - // Looking at the end of `initContext` (original): - // It initializes the array `ctx.ui_tex_descriptor_pool` to nulls. - // It doesn't allocate them. - // Wait, `drawTexture2D` allocates them? - // `drawTexture2D` at line 5081 uses `ds`. - // `ds` comes from `ctx.ui_tex_descriptor_pool[frame][idx]`. - // If it's null, it must be allocated. - // But `drawTexture2D` doesn't show allocation logic in the snippet I have (lines 5051+). - // Ah, I missed where they are allocated. - // Maybe they are allocated on demand? - // Let's assume I need to keep `descriptor_pool` large enough for UI too. - // `DescriptorManager` created a pool with 100 sets. That might be too small for UI if UI uses many. - // I should increase `DescriptorManager` pool size. - - var ui_tex_layout_full_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); - ui_tex_layout_full_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - ui_tex_layout_full_info.setLayoutCount = 1; - ui_tex_layout_full_info.pSetLayouts = &ctx.ui_tex_descriptor_set_layout; - ui_tex_layout_full_info.pushConstantRangeCount = 1; - ui_tex_layout_full_info.pPushConstantRanges = &ui_push_constant; - try Utils.checkVk(c.vkCreatePipelineLayout(ctx.vulkan_device.vk_device, &ui_tex_layout_full_info, null, &ctx.ui_tex_pipeline_layout)); - - if (comptime build_options.debug_shadows) { - var debug_shadow_layout_full_info: c.VkPipelineLayoutCreateInfo = undefined; - @memset(std.mem.asBytes(&debug_shadow_layout_full_info), 0); - debug_shadow_layout_full_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - debug_shadow_layout_full_info.setLayoutCount = 1; - const debug_layout = ctx.debug_shadow.descriptor_set_layout orelse return error.InitializationFailed; - debug_shadow_layout_full_info.pSetLayouts = &debug_layout; - debug_shadow_layout_full_info.pushConstantRangeCount = 1; - debug_shadow_layout_full_info.pPushConstantRanges = &ui_push_constant; - try Utils.checkVk(c.vkCreatePipelineLayout(ctx.vulkan_device.vk_device, &debug_shadow_layout_full_info, null, &ctx.debug_shadow.pipeline_layout)); - } - - var cloud_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); - cloud_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - cloud_layout_info.pushConstantRangeCount = 1; - cloud_layout_info.pPushConstantRanges = &sky_push_constant; - try Utils.checkVk(c.vkCreatePipelineLayout(ctx.vulkan_device.vk_device, &cloud_layout_info, null, &ctx.cloud_pipeline_layout)); - - // Shadow Pass (Legacy) - // ... [Copy Shadow Pass creation logic from lines 2114-2285] ... - // NOTE: This logic creates shadow_render_pass, shadow_pipeline, etc. - // I will call a helper function `createShadowResources` which essentially contains that logic. - // Wait, `createShadowResources` was not existing in original file, it was inline. - // I should create it to keep initContext clean. - try createShadowResources(ctx); - - // Initial resources - HDR must be created before main render pass (framebuffers use HDR views) - try createHDRResources(ctx); - try createGPassResources(ctx); - try createSSAOResources(ctx); - - // Create main render pass and framebuffers (depends on HDR views) - try createMainRenderPass(ctx); - - // Final Pipelines (depend on main_render_pass) - try createMainPipelines(ctx); - - // Post-process resources (depend on HDR views and post-process render pass) - try createPostProcessResources(ctx); - try createSwapchainUIResources(ctx); - - // Phase 3: FXAA and Bloom resources (depend on post-process sampler and HDR views) - try ctx.fxaa.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.swapchain.getExtent(), ctx.swapchain.getImageFormat(), ctx.post_process_sampler, ctx.swapchain.getImageViews()); - try createSwapchainUIPipelines(ctx); - try ctx.bloom.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.hdr_view, ctx.swapchain.getExtent().width, ctx.swapchain.getExtent().height, c.VK_FORMAT_R16G16B16A16_SFLOAT); - - // Update post-process descriptor sets to include bloom texture (binding 2) - updatePostProcessDescriptorsWithBloom(ctx); - - // Setup Dummy Textures from DescriptorManager - ctx.dummy_texture = ctx.descriptors.dummy_texture; - ctx.dummy_normal_texture = ctx.descriptors.dummy_normal_texture; - ctx.dummy_roughness_texture = ctx.descriptors.dummy_roughness_texture; - ctx.current_texture = ctx.dummy_texture; - ctx.current_normal_texture = ctx.dummy_normal_texture; - ctx.current_roughness_texture = ctx.dummy_roughness_texture; - ctx.current_displacement_texture = ctx.dummy_roughness_texture; - ctx.current_env_texture = ctx.dummy_texture; - - // Create cloud resources - const cloud_vbo_handle = try ctx.resources.createBuffer(8 * @sizeOf(f32), .vertex); - std.log.info("Cloud VBO handle: {}, map count: {}", .{ cloud_vbo_handle, ctx.resources.buffers.count() }); - if (cloud_vbo_handle == 0) { - std.log.err("Failed to create cloud VBO", .{}); - return error.InitializationFailed; - } - const cloud_buf = ctx.resources.buffers.get(cloud_vbo_handle); - if (cloud_buf == null) { - std.log.err("Cloud VBO created but not found in map!", .{}); - return error.InitializationFailed; - } - ctx.cloud_vbo = cloud_buf.?; - - // Create UI VBOs - for (0..MAX_FRAMES_IN_FLIGHT) |i| { - ctx.ui_vbos[i] = try Utils.createVulkanBuffer(&ctx.vulkan_device, 1024 * 1024, c.VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, c.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | c.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); - } - - for (0..MAX_FRAMES_IN_FLIGHT) |i| { - ctx.descriptors_dirty[i] = true; - // Init UI pools - for (0..64) |j| ctx.ui_tex_descriptor_pool[i][j] = null; - ctx.ui_tex_descriptor_next[i] = 0; - } - - try ctx.resources.flushTransfer(); - // Reset to frame 0 after initialization. Dummy textures created at index 1 are safe. - ctx.resources.setCurrentFrame(0); - - // Ensure shadow image is in readable layout initially (in case ShadowPass is skipped) - if (ctx.shadow_system.shadow_image != null) { - try transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.shadow_system.shadow_image}, true); - for (0..rhi.SHADOW_CASCADE_COUNT) |i| { - ctx.shadow_system.shadow_image_layouts[i] = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - } - } - - // Ensure all images are in shader-read layout initially - { - var list: [32]c.VkImage = undefined; - var count: usize = 0; - // Note: ctx.hdr_msaa_image is transient and not sampled, so it should not be transitioned to SHADER_READ_ONLY_OPTIMAL - const candidates = [_]c.VkImage{ ctx.hdr_image, ctx.g_normal_image, ctx.ssao_system.image, ctx.ssao_system.blur_image, ctx.ssao_system.noise_image, ctx.velocity_image }; - for (candidates) |img| { - if (img != null) { - list[count] = img; - count += 1; - } - } - // Also transition bloom mips - for (ctx.bloom.mip_images) |img| { - if (img != null) { - list[count] = img; - count += 1; - } - } - - if (count > 0) { - transitionImagesToShaderRead(ctx, list[0..count], false) catch |err| std.log.err("Failed to transition images during init: {}", .{err}); - } - - if (ctx.g_depth_image != null) { - transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.g_depth_image}, true) catch |err| std.log.err("Failed to transition G-depth image during init: {}", .{err}); - } - } - - // 11. GPU Timing Query Pool - var query_pool_info = std.mem.zeroes(c.VkQueryPoolCreateInfo); - query_pool_info.sType = c.VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; - query_pool_info.queryType = c.VK_QUERY_TYPE_TIMESTAMP; - query_pool_info.queryCount = TOTAL_QUERY_COUNT; - try Utils.checkVk(c.vkCreateQueryPool(ctx.vulkan_device.vk_device, &query_pool_info, null, &ctx.query_pool)); + try init_deinit.initContext(ctx, allocator, render_device); } fn deinit(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.dry_run) { - _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); - } - - destroyMainRenderPassAndPipelines(ctx); - destroyHDRResources(ctx); - destroyFXAAResources(ctx); - destroyBloomResources(ctx); - destroyVelocityResources(ctx); - destroyPostProcessResources(ctx); - destroyGPassResources(ctx); - - if (ctx.pipeline_layout != null) c.vkDestroyPipelineLayout(ctx.vulkan_device.vk_device, ctx.pipeline_layout, null); - if (ctx.sky_pipeline_layout != null) c.vkDestroyPipelineLayout(ctx.vulkan_device.vk_device, ctx.sky_pipeline_layout, null); - if (ctx.ui_pipeline_layout != null) c.vkDestroyPipelineLayout(ctx.vulkan_device.vk_device, ctx.ui_pipeline_layout, null); - if (ctx.ui_tex_pipeline_layout != null) c.vkDestroyPipelineLayout(ctx.vulkan_device.vk_device, ctx.ui_tex_pipeline_layout, null); - if (ctx.ui_tex_descriptor_set_layout != null) c.vkDestroyDescriptorSetLayout(ctx.vulkan_device.vk_device, ctx.ui_tex_descriptor_set_layout, null); - if (ctx.post_process_descriptor_set_layout != null) c.vkDestroyDescriptorSetLayout(ctx.vulkan_device.vk_device, ctx.post_process_descriptor_set_layout, null); - if (comptime build_options.debug_shadows) { - if (ctx.debug_shadow.pipeline_layout) |layout| c.vkDestroyPipelineLayout(ctx.vulkan_device.vk_device, layout, null); - if (ctx.debug_shadow.descriptor_set_layout) |layout| c.vkDestroyDescriptorSetLayout(ctx.vulkan_device.vk_device, layout, null); - } - if (ctx.cloud_pipeline_layout != null) c.vkDestroyPipelineLayout(ctx.vulkan_device.vk_device, ctx.cloud_pipeline_layout, null); - - // Destroy internal buffers and resources - // Helper to destroy raw VulkanBuffers - const device = ctx.vulkan_device.vk_device; - { - if (ctx.model_ubo.buffer != null) c.vkDestroyBuffer(device, ctx.model_ubo.buffer, null); - if (ctx.model_ubo.memory != null) c.vkFreeMemory(device, ctx.model_ubo.memory, null); - - if (ctx.dummy_instance_buffer.buffer != null) c.vkDestroyBuffer(device, ctx.dummy_instance_buffer.buffer, null); - if (ctx.dummy_instance_buffer.memory != null) c.vkFreeMemory(device, ctx.dummy_instance_buffer.memory, null); - - for (ctx.ui_vbos) |buf| { - if (buf.buffer != null) c.vkDestroyBuffer(device, buf.buffer, null); - if (buf.memory != null) c.vkFreeMemory(device, buf.memory, null); - } - } - - if (comptime build_options.debug_shadows) { - if (ctx.debug_shadow.vbo.buffer != null) c.vkDestroyBuffer(device, ctx.debug_shadow.vbo.buffer, null); - if (ctx.debug_shadow.vbo.memory != null) c.vkFreeMemory(device, ctx.debug_shadow.vbo.memory, null); - } - // Note: cloud_vbo is managed by resource manager and destroyed there - - // Destroy dummy textures - ctx.resources.destroyTexture(ctx.dummy_texture); - ctx.resources.destroyTexture(ctx.dummy_normal_texture); - ctx.resources.destroyTexture(ctx.dummy_roughness_texture); - if (ctx.dummy_shadow_view != null) c.vkDestroyImageView(ctx.vulkan_device.vk_device, ctx.dummy_shadow_view, null); - if (ctx.dummy_shadow_image != null) c.vkDestroyImage(ctx.vulkan_device.vk_device, ctx.dummy_shadow_image, null); - if (ctx.dummy_shadow_memory != null) c.vkFreeMemory(ctx.vulkan_device.vk_device, ctx.dummy_shadow_memory, null); - - ctx.shadow_system.deinit(ctx.vulkan_device.vk_device); - - ctx.descriptors.deinit(); - ctx.swapchain.deinit(); - ctx.frames.deinit(); - ctx.resources.deinit(); - - if (ctx.query_pool != null) { - c.vkDestroyQueryPool(ctx.vulkan_device.vk_device, ctx.query_pool, null); - } - - ctx.vulkan_device.deinit(); - - ctx.allocator.destroy(ctx); + init_deinit.deinit(ctx); } fn createBuffer(ctx_ptr: *anyopaque, size: usize, usage: rhi.BufferUsage) rhi.RhiError!rhi.BufferHandle { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); @@ -2520,109 +58,17 @@ fn destroyBuffer(ctx_ptr: *anyopaque, handle: rhi.BufferHandle) void { ctx.resources.destroyBuffer(handle); } -fn recreateSwapchainInternal(ctx: *VulkanContext) void { - std.debug.print("recreateSwapchainInternal: starting...\n", .{}); - _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); - - var w: c_int = 0; - var h: c_int = 0; - _ = c.SDL_GetWindowSizeInPixels(ctx.window, &w, &h); - if (w == 0 or h == 0) { - std.debug.print("recreateSwapchainInternal: window minimized or 0 size, skipping.\n", .{}); - return; - } - - std.debug.print("recreateSwapchainInternal: destroying old resources...\n", .{}); - destroyMainRenderPassAndPipelines(ctx); - destroyHDRResources(ctx); - destroyFXAAResources(ctx); - destroyBloomResources(ctx); - destroyPostProcessResources(ctx); - destroyGPassResources(ctx); - - ctx.main_pass_active = false; - ctx.shadow_system.pass_active = false; - ctx.g_pass_active = false; - ctx.ssao_pass_active = false; - - std.debug.print("recreateSwapchainInternal: swapchain.recreate()...\n", .{}); - ctx.swapchain.recreate() catch |err| { - std.log.err("Failed to recreate swapchain: {}", .{err}); - return; - }; - - // Recreate resources - std.debug.print("recreateSwapchainInternal: recreating resources...\n", .{}); - createHDRResources(ctx) catch |err| std.log.err("Failed to recreate HDR resources: {}", .{err}); - createGPassResources(ctx) catch |err| std.log.err("Failed to recreate G-Pass resources: {}", .{err}); - createSSAOResources(ctx) catch |err| std.log.err("Failed to recreate SSAO resources: {}", .{err}); - createMainRenderPass(ctx) catch |err| std.log.err("Failed to recreate render pass: {}", .{err}); - createMainPipelines(ctx) catch |err| std.log.err("Failed to recreate pipelines: {}", .{err}); - createPostProcessResources(ctx) catch |err| std.log.err("Failed to recreate post-process resources: {}", .{err}); - createSwapchainUIResources(ctx) catch |err| std.log.err("Failed to recreate swapchain UI resources: {}", .{err}); - ctx.fxaa.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.swapchain.getExtent(), ctx.swapchain.getImageFormat(), ctx.post_process_sampler, ctx.swapchain.getImageViews()) catch |err| std.log.err("Failed to recreate FXAA resources: {}", .{err}); - createSwapchainUIPipelines(ctx) catch |err| std.log.err("Failed to recreate swapchain UI pipelines: {}", .{err}); - ctx.bloom.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.hdr_view, ctx.swapchain.getExtent().width, ctx.swapchain.getExtent().height, c.VK_FORMAT_R16G16B16A16_SFLOAT) catch |err| std.log.err("Failed to recreate Bloom resources: {}", .{err}); - updatePostProcessDescriptorsWithBloom(ctx); - - // Ensure all recreated images are in a known layout - { - var list: [32]c.VkImage = undefined; - var count: usize = 0; - // Note: ctx.hdr_msaa_image is transient and not sampled, so it should not be transitioned to SHADER_READ_ONLY_OPTIMAL - const candidates = [_]c.VkImage{ ctx.hdr_image, ctx.g_normal_image, ctx.ssao_system.image, ctx.ssao_system.blur_image, ctx.ssao_system.noise_image, ctx.velocity_image }; - for (candidates) |img| { - if (img != null) { - list[count] = img; - count += 1; - } - } - // Also transition bloom mips - for (ctx.bloom.mip_images) |img| { - if (img != null) { - list[count] = img; - count += 1; - } - } - - if (count > 0) { - transitionImagesToShaderRead(ctx, list[0..count], false) catch |err| std.log.warn("Failed to transition images: {}", .{err}); - } - - if (ctx.g_depth_image != null) { - transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.g_depth_image}, true) catch |err| std.log.warn("Failed to transition G-depth image: {}", .{err}); - } - if (ctx.shadow_system.shadow_image != null) { - transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.shadow_system.shadow_image}, true) catch |err| std.log.warn("Failed to transition Shadow image: {}", .{err}); - for (0..rhi.SHADOW_CASCADE_COUNT) |i| { - ctx.shadow_system.shadow_image_layouts[i] = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - } - } - } - - ctx.framebuffer_resized = false; - - ctx.pipeline_rebuild_needed = false; - std.debug.print("recreateSwapchainInternal: done.\n", .{}); -} - -fn recreateSwapchain(ctx: *VulkanContext) void { - ctx.mutex.lock(); - defer ctx.mutex.unlock(); - recreateSwapchainInternal(ctx); -} - fn beginFrame(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - if (ctx.gpu_fault_detected) return; + if (ctx.runtime.gpu_fault_detected) return; if (ctx.frames.frame_in_progress) return; - if (ctx.framebuffer_resized) { + if (ctx.runtime.framebuffer_resized) { std.log.info("beginFrame: triggering recreateSwapchainInternal (resize)", .{}); - recreateSwapchainInternal(ctx); + frame_orchestration.recreateSwapchainInternal(ctx); } if (ctx.resources.transfer_ready) { @@ -2634,206 +80,40 @@ fn beginFrame(ctx_ptr: *anyopaque) void { // Begin frame (acquire image, reset fences/CBs) const frame_started = ctx.frames.beginFrame(&ctx.swapchain) catch |err| { if (err == error.GpuLost) { - ctx.gpu_fault_detected = true; + ctx.runtime.gpu_fault_detected = true; } else { std.log.err("beginFrame failed: {}", .{err}); } - return; - }; - - if (frame_started) { - processTimingResults(ctx); - - const current_frame = ctx.frames.current_frame; - const command_buffer = ctx.frames.command_buffers[current_frame]; - if (ctx.query_pool != null) { - c.vkCmdResetQueryPool(command_buffer, ctx.query_pool, @intCast(current_frame * QUERY_COUNT_PER_FRAME), QUERY_COUNT_PER_FRAME); - } - } - - ctx.resources.setCurrentFrame(ctx.frames.current_frame); - - if (!frame_started) { - return; - } - - applyPendingDescriptorUpdates(ctx, ctx.frames.current_frame); - - ctx.draw_call_count = 0; - ctx.main_pass_active = false; - ctx.shadow_system.pass_active = false; - ctx.post_process_ran_this_frame = false; - ctx.fxaa_ran_this_frame = false; - ctx.ui_using_swapchain = false; - - ctx.terrain_pipeline_bound = false; - ctx.shadow_system.pipeline_bound = false; - ctx.descriptors_updated = false; - ctx.bound_texture = 0; - - const command_buffer = ctx.frames.getCurrentCommandBuffer(); - - // Memory barrier for host writes - var mem_barrier = std.mem.zeroes(c.VkMemoryBarrier); - mem_barrier.sType = c.VK_STRUCTURE_TYPE_MEMORY_BARRIER; - mem_barrier.srcAccessMask = c.VK_ACCESS_HOST_WRITE_BIT | c.VK_ACCESS_TRANSFER_WRITE_BIT; - mem_barrier.dstAccessMask = c.VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | c.VK_ACCESS_INDEX_READ_BIT | c.VK_ACCESS_SHADER_READ_BIT | c.VK_ACCESS_INDIRECT_COMMAND_READ_BIT; - c.vkCmdPipelineBarrier( - command_buffer, - c.VK_PIPELINE_STAGE_HOST_BIT | c.VK_PIPELINE_STAGE_TRANSFER_BIT, - c.VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | c.VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | c.VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, - 0, - 1, - &mem_barrier, - 0, - null, - 0, - null, - ); - - ctx.ui_vertex_offset = 0; - ctx.ui_flushed_vertex_count = 0; - ctx.ui_tex_descriptor_next[ctx.frames.current_frame] = 0; - if (comptime build_options.debug_shadows) { - ctx.debug_shadow.descriptor_next[ctx.frames.current_frame] = 0; - } - - // Static descriptor updates (Atlases & Shadow maps) - const cur_tex = ctx.current_texture; - const cur_nor = ctx.current_normal_texture; - const cur_rou = ctx.current_roughness_texture; - const cur_dis = ctx.current_displacement_texture; - const cur_env = ctx.current_env_texture; - - // Check if any texture bindings or shadow views changed since last frame - var needs_update = false; - if (ctx.bound_texture != cur_tex) needs_update = true; - if (ctx.bound_normal_texture != cur_nor) needs_update = true; - if (ctx.bound_roughness_texture != cur_rou) needs_update = true; - if (ctx.bound_displacement_texture != cur_dis) needs_update = true; - if (ctx.bound_env_texture != cur_env) needs_update = true; - - for (0..rhi.SHADOW_CASCADE_COUNT) |si| { - if (ctx.bound_shadow_views[si] != ctx.shadow_system.shadow_image_views[si]) needs_update = true; - } - - // Also update if we've cycled back to this frame in flight and haven't updated this set yet - if (needs_update) { - for (0..MAX_FRAMES_IN_FLIGHT) |i| ctx.descriptors_dirty[i] = true; - // Update tracking immediately so next frame doesn't re-trigger a dirty state for all frames - ctx.bound_texture = cur_tex; - ctx.bound_normal_texture = cur_nor; - ctx.bound_roughness_texture = cur_rou; - ctx.bound_displacement_texture = cur_dis; - ctx.bound_env_texture = cur_env; - for (0..rhi.SHADOW_CASCADE_COUNT) |si| ctx.bound_shadow_views[si] = ctx.shadow_system.shadow_image_views[si]; - } - - if (ctx.descriptors_dirty[ctx.frames.current_frame]) { - if (ctx.descriptors.descriptor_sets[ctx.frames.current_frame] == null) { - std.log.err("CRITICAL: Descriptor set for frame {} is NULL!", .{ctx.frames.current_frame}); - return; - } - var writes: [10]c.VkWriteDescriptorSet = undefined; - var write_count: u32 = 0; - var image_infos: [10]c.VkDescriptorImageInfo = undefined; - var info_count: u32 = 0; - - const dummy_tex_entry = ctx.resources.textures.get(ctx.dummy_texture); - - const atlas_slots = [_]struct { handle: rhi.TextureHandle, binding: u32 }{ - .{ .handle = cur_tex, .binding = 1 }, - .{ .handle = cur_nor, .binding = 6 }, - .{ .handle = cur_rou, .binding = 7 }, - .{ .handle = cur_dis, .binding = 8 }, - .{ .handle = cur_env, .binding = 9 }, - }; - - for (atlas_slots) |slot| { - const entry = ctx.resources.textures.get(slot.handle) orelse dummy_tex_entry; - if (entry) |tex| { - image_infos[info_count] = .{ - .sampler = tex.sampler, - .imageView = tex.view, - .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - }; - writes[write_count] = std.mem.zeroes(c.VkWriteDescriptorSet); - writes[write_count].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - writes[write_count].dstSet = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - writes[write_count].dstBinding = slot.binding; - writes[write_count].descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - writes[write_count].descriptorCount = 1; - writes[write_count].pImageInfo = &image_infos[info_count]; - write_count += 1; - info_count += 1; - } - } - - // Shadows - { - if (ctx.shadow_system.shadow_sampler == null) { - std.log.err("CRITICAL: Shadow sampler is NULL!", .{}); - } - if (ctx.shadow_system.shadow_sampler_regular == null) { - std.log.err("CRITICAL: Shadow regular sampler is NULL!", .{}); - } - if (ctx.shadow_system.shadow_image_view == null) { - std.log.err("CRITICAL: Shadow image view is NULL!", .{}); - } - image_infos[info_count] = .{ - .sampler = ctx.shadow_system.shadow_sampler, - .imageView = ctx.shadow_system.shadow_image_view, - .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - }; - writes[write_count] = std.mem.zeroes(c.VkWriteDescriptorSet); - writes[write_count].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - writes[write_count].dstSet = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - writes[write_count].dstBinding = 3; - writes[write_count].descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - writes[write_count].descriptorCount = 1; - writes[write_count].pImageInfo = &image_infos[info_count]; - write_count += 1; - info_count += 1; - - image_infos[info_count] = .{ - .sampler = if (ctx.shadow_system.shadow_sampler_regular != null) ctx.shadow_system.shadow_sampler_regular else ctx.shadow_system.shadow_sampler, - .imageView = ctx.shadow_system.shadow_image_view, - .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - }; - writes[write_count] = std.mem.zeroes(c.VkWriteDescriptorSet); - writes[write_count].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - writes[write_count].dstSet = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - writes[write_count].dstBinding = 4; - writes[write_count].descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - writes[write_count].descriptorCount = 1; - writes[write_count].pImageInfo = &image_infos[info_count]; - write_count += 1; - info_count += 1; - } + return; + }; - if (write_count > 0) { - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, write_count, &writes[0], 0, null); + if (frame_started) { + processTimingResults(ctx); - // Also update LOD descriptor sets with the same texture bindings - for (0..write_count) |i| { - writes[i].dstSet = ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame]; - } - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, write_count, &writes[0], 0, null); + const current_frame = ctx.frames.current_frame; + const command_buffer = ctx.frames.command_buffers[current_frame]; + if (ctx.timing.query_pool != null) { + c.vkCmdResetQueryPool(command_buffer, ctx.timing.query_pool, @intCast(current_frame * QUERY_COUNT_PER_FRAME), QUERY_COUNT_PER_FRAME); } + } + + ctx.resources.setCurrentFrame(ctx.frames.current_frame); - ctx.descriptors_dirty[ctx.frames.current_frame] = false; + if (!frame_started) { + return; } - ctx.descriptors_updated = true; + render_state.applyPendingDescriptorUpdates(ctx, ctx.frames.current_frame); + frame_orchestration.prepareFrameState(ctx); } fn abortFrame(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); if (!ctx.frames.frame_in_progress) return; - if (ctx.main_pass_active) endMainPass(ctx_ptr); + if (ctx.runtime.main_pass_active) endMainPass(ctx_ptr); if (ctx.shadow_system.pass_active) endShadowPass(ctx_ptr); - if (ctx.g_pass_active) endGPass(ctx_ptr); + if (ctx.runtime.g_pass_active) endGPass(ctx_ptr); ctx.frames.abortFrame(); @@ -2849,397 +129,60 @@ fn abortFrame(ctx_ptr: *anyopaque) void { c.vkDestroySemaphore(device, ctx.frames.render_finished_semaphores[frame], null); _ = c.vkCreateSemaphore(device, &semaphore_info, null, &ctx.frames.render_finished_semaphores[frame]); - ctx.draw_call_count = 0; - ctx.main_pass_active = false; + ctx.runtime.draw_call_count = 0; + ctx.runtime.main_pass_active = false; ctx.shadow_system.pass_active = false; - ctx.g_pass_active = false; - ctx.ssao_pass_active = false; - ctx.descriptors_updated = false; - ctx.bound_texture = 0; -} - -fn beginGPassInternal(ctx: *VulkanContext) void { - if (!ctx.frames.frame_in_progress or ctx.g_pass_active) return; - - // Safety: Skip G-pass if resources are not available - if (ctx.g_render_pass == null or ctx.g_framebuffer == null or ctx.g_pipeline == null) { - std.log.warn("beginGPass: skipping - resources null (rp={}, fb={}, pipeline={})", .{ ctx.g_render_pass != null, ctx.g_framebuffer != null, ctx.g_pipeline != null }); - return; - } - - // Safety: Check for size mismatch between G-pass resources and current swapchain - if (ctx.g_pass_extent.width != ctx.swapchain.getExtent().width or ctx.g_pass_extent.height != ctx.swapchain.getExtent().height) { - std.log.warn("beginGPass: size mismatch! G-pass={}x{}, swapchain={}x{} - recreating", .{ ctx.g_pass_extent.width, ctx.g_pass_extent.height, ctx.swapchain.getExtent().width, ctx.swapchain.getExtent().height }); - _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); - createGPassResources(ctx) catch |err| { - std.log.err("Failed to recreate G-pass resources: {}", .{err}); - return; - }; - createSSAOResources(ctx) catch |err| { - std.log.err("Failed to recreate SSAO resources: {}", .{err}); - }; - } - - ensureNoRenderPassActiveInternal(ctx); - - ctx.g_pass_active = true; - const current_frame = ctx.frames.current_frame; - const command_buffer = ctx.frames.command_buffers[current_frame]; - - // Debug: check for NULL handles - if (command_buffer == null) std.log.err("CRITICAL: command_buffer is NULL for frame {}", .{current_frame}); - if (ctx.g_render_pass == null) std.log.err("CRITICAL: g_render_pass is NULL", .{}); - if (ctx.g_framebuffer == null) std.log.err("CRITICAL: g_framebuffer is NULL", .{}); - if (ctx.pipeline_layout == null) std.log.err("CRITICAL: pipeline_layout is NULL", .{}); - - var render_pass_info = std.mem.zeroes(c.VkRenderPassBeginInfo); - render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - render_pass_info.renderPass = ctx.g_render_pass; - render_pass_info.framebuffer = ctx.g_framebuffer; - render_pass_info.renderArea.offset = .{ .x = 0, .y = 0 }; - render_pass_info.renderArea.extent = ctx.swapchain.getExtent(); - - // Debug: log extent on first few frames - if (ctx.frame_index < 10) { - // std.log.debug("beginGPass frame {}: extent {}x{} (cb={}, rp={}, fb={})", .{ ctx.frame_index, ctx.swapchain.getExtent().width, ctx.swapchain.getExtent().height, command_buffer != null, ctx.g_render_pass != null, ctx.g_framebuffer != null }); - } - - var clear_values: [3]c.VkClearValue = undefined; - clear_values[0] = std.mem.zeroes(c.VkClearValue); - clear_values[0].color = .{ .float32 = .{ 0, 0, 0, 1 } }; // Normal - clear_values[1] = std.mem.zeroes(c.VkClearValue); - clear_values[1].color = .{ .float32 = .{ 0, 0, 0, 1 } }; // Velocity - clear_values[2] = std.mem.zeroes(c.VkClearValue); - clear_values[2].depthStencil = .{ .depth = 0.0, .stencil = 0 }; // Depth (Reverse-Z) - render_pass_info.clearValueCount = 3; - render_pass_info.pClearValues = &clear_values[0]; - - c.vkCmdBeginRenderPass(command_buffer, &render_pass_info, c.VK_SUBPASS_CONTENTS_INLINE); - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.g_pipeline); - - const viewport = c.VkViewport{ .x = 0, .y = 0, .width = @floatFromInt(ctx.swapchain.getExtent().width), .height = @floatFromInt(ctx.swapchain.getExtent().height), .minDepth = 0, .maxDepth = 1 }; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = ctx.swapchain.getExtent() }; - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); - - const ds = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - if (ds == null) std.log.err("CRITICAL: descriptor_set is NULL for frame {}", .{ctx.frames.current_frame}); - - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_layout, 0, 1, &ds, 0, null); + ctx.runtime.g_pass_active = false; + ctx.runtime.ssao_pass_active = false; + ctx.draw.descriptors_updated = false; + ctx.draw.bound_texture = 0; } fn beginGPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - beginGPassInternal(ctx); -} - -fn endGPassInternal(ctx: *VulkanContext) void { - if (!ctx.g_pass_active) return; - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - c.vkCmdEndRenderPass(command_buffer); - ctx.g_pass_active = false; + pass_orchestration.beginGPassInternal(ctx); } fn endGPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - endGPassInternal(ctx); + pass_orchestration.endGPassInternal(ctx); } -// Phase 3: FXAA Pass fn beginFXAAPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - beginFXAAPassInternal(ctx); -} - -fn beginFXAAPassInternal(ctx: *VulkanContext) void { - if (!ctx.fxaa.enabled) return; - if (ctx.fxaa.pass_active) return; - if (ctx.fxaa.pipeline == null) return; - if (ctx.fxaa.render_pass == null) return; - - const image_index = ctx.frames.current_image_index; - if (image_index >= ctx.fxaa.framebuffers.items.len) return; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - const extent = ctx.swapchain.getExtent(); - - // Begin FXAA render pass (outputs to swapchain) - var clear_value = std.mem.zeroes(c.VkClearValue); - clear_value.color.float32 = .{ 0.0, 0.0, 0.0, 1.0 }; - - var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); - rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - rp_begin.renderPass = ctx.fxaa.render_pass; - rp_begin.framebuffer = ctx.fxaa.framebuffers.items[image_index]; - rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; - rp_begin.clearValueCount = 1; - rp_begin.pClearValues = &clear_value; - - c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); - - // Set viewport and scissor - const viewport = c.VkViewport{ - .x = 0, - .y = 0, - .width = @floatFromInt(extent.width), - .height = @floatFromInt(extent.height), - .minDepth = 0.0, - .maxDepth = 1.0, - }; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - - const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); - - // Bind FXAA pipeline - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.fxaa.pipeline); - - // Bind descriptor set (contains FXAA input texture) - const frame = ctx.frames.current_frame; - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.fxaa.pipeline_layout, 0, 1, &ctx.fxaa.descriptor_sets[frame], 0, null); - - // Push FXAA constants - const push = FXAAPushConstants{ - .texel_size = .{ 1.0 / @as(f32, @floatFromInt(extent.width)), 1.0 / @as(f32, @floatFromInt(extent.height)) }, - .fxaa_span_max = 8.0, - .fxaa_reduce_mul = 1.0 / 8.0, - }; - c.vkCmdPushConstants(command_buffer, ctx.fxaa.pipeline_layout, c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(FXAAPushConstants), &push); - - // Draw fullscreen triangle - c.vkCmdDraw(command_buffer, 3, 1, 0, 0); - ctx.draw_call_count += 1; - - ctx.fxaa_ran_this_frame = true; - ctx.fxaa.pass_active = true; -} - -fn beginFXAAPassForUI(ctx: *VulkanContext) void { - if (!ctx.frames.frame_in_progress) return; - if (ctx.fxaa.pass_active) return; - if (ctx.ui_swapchain_render_pass == null) return; - if (ctx.ui_swapchain_framebuffers.items.len == 0) return; - - const image_index = ctx.frames.current_image_index; - if (image_index >= ctx.ui_swapchain_framebuffers.items.len) return; - - ensureNoRenderPassActiveInternal(ctx); - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - const extent = ctx.swapchain.getExtent(); - - var clear_value = std.mem.zeroes(c.VkClearValue); - clear_value.color.float32 = .{ 0.0, 0.0, 0.0, 1.0 }; - - var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); - rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - rp_begin.renderPass = ctx.ui_swapchain_render_pass; - rp_begin.framebuffer = ctx.ui_swapchain_framebuffers.items[image_index]; - rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; - rp_begin.clearValueCount = 1; - rp_begin.pClearValues = &clear_value; - - c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); - - const viewport = c.VkViewport{ - .x = 0, - .y = 0, - .width = @floatFromInt(extent.width), - .height = @floatFromInt(extent.height), - .minDepth = 0.0, - .maxDepth = 1.0, - }; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - - const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); - - ctx.fxaa.pass_active = true; + pass_orchestration.beginFXAAPassInternal(ctx); } fn endFXAAPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - endFXAAPassInternal(ctx); -} - -fn endFXAAPassInternal(ctx: *VulkanContext) void { - if (!ctx.fxaa.pass_active) return; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - c.vkCmdEndRenderPass(command_buffer); - - ctx.fxaa.pass_active = false; + pass_orchestration.endFXAAPassInternal(ctx); } -// Phase 3: Bloom Computation fn computeBloom(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - computeBloomInternal(ctx); -} - -fn computeBloomInternal(ctx: *VulkanContext) void { - if (!ctx.bloom.enabled) return; - if (ctx.bloom.downsample_pipeline == null) return; - if (ctx.bloom.upsample_pipeline == null) return; - if (ctx.bloom.render_pass == null) return; - if (ctx.hdr_image == null) return; if (!ctx.frames.frame_in_progress) return; - - // Ensure any active render passes are ended before issuing barriers - ensureNoRenderPassActiveInternal(ctx); + pass_orchestration.ensureNoRenderPassActiveInternal(ctx); const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - const frame = ctx.frames.current_frame; - - // The HDR image is already transitioned to SHADER_READ_ONLY_OPTIMAL by the main render pass (via finalLayout). - // However, we still need a pipeline barrier for memory visibility and to ensure the GPU has finished - // writing to the HDR image before we start downsampling. - var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); - barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - barrier.oldLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; // Match finalLayout of main pass - barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.image = ctx.hdr_image; - barrier.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; - - c.vkCmdPipelineBarrier(command_buffer, c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); - - // Downsample pass: HDR -> mip0 -> ... -> mipN - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.bloom.downsample_pipeline); - - for (0..BLOOM_MIP_COUNT) |i| { - const mip_width = ctx.bloom.mip_widths[i]; - const mip_height = ctx.bloom.mip_heights[i]; - - // Begin render pass for this mip level - var clear_value = std.mem.zeroes(c.VkClearValue); - clear_value.color.float32 = .{ 0.0, 0.0, 0.0, 1.0 }; - - var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); - rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - rp_begin.renderPass = ctx.bloom.render_pass; - rp_begin.framebuffer = ctx.bloom.mip_framebuffers[i]; - rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; - rp_begin.clearValueCount = 1; - rp_begin.pClearValues = &clear_value; - - c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); - - // Set viewport and scissor - const viewport = c.VkViewport{ - .x = 0, - .y = 0, - .width = @floatFromInt(mip_width), - .height = @floatFromInt(mip_height), - .minDepth = 0.0, - .maxDepth = 1.0, - }; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - - const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); - - // Bind descriptor set (set i samples from source) - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.bloom.pipeline_layout, 0, 1, &ctx.bloom.descriptor_sets[frame][i], 0, null); - - // Source dimensions for texel size - const src_width: f32 = if (i == 0) @floatFromInt(ctx.swapchain.getExtent().width) else @floatFromInt(ctx.bloom.mip_widths[i - 1]); - const src_height: f32 = if (i == 0) @floatFromInt(ctx.swapchain.getExtent().height) else @floatFromInt(ctx.bloom.mip_heights[i - 1]); - - // Push constants with threshold only on first pass - const push = BloomPushConstants{ - .texel_size = .{ 1.0 / src_width, 1.0 / src_height }, - .threshold_or_radius = if (i == 0) ctx.bloom.threshold else 0.0, - .soft_threshold_or_intensity = 0.5, // soft knee - .mip_level = @intCast(i), - }; - c.vkCmdPushConstants(command_buffer, ctx.bloom.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(BloomPushConstants), &push); - - // Draw fullscreen triangle - c.vkCmdDraw(command_buffer, 3, 1, 0, 0); - ctx.draw_call_count += 1; - - c.vkCmdEndRenderPass(command_buffer); - } - - // Upsample pass: Accumulating back up the mip chain - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.bloom.upsample_pipeline); - - // Upsample (BLOOM_MIP_COUNT-1 passes, accumulating into each mip level) - for (0..BLOOM_MIP_COUNT - 1) |pass| { - const target_mip = (BLOOM_MIP_COUNT - 2) - pass; // Target mips: e.g. 3, 2, 1, 0 if count=5 - const mip_width = ctx.bloom.mip_widths[target_mip]; - const mip_height = ctx.bloom.mip_heights[target_mip]; - - // Begin render pass for target mip level - var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); - rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - rp_begin.renderPass = ctx.bloom.render_pass; - rp_begin.framebuffer = ctx.bloom.mip_framebuffers[target_mip]; - rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; - rp_begin.clearValueCount = 0; // Don't clear, we're blending - - c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); - - // Set viewport and scissor - const viewport = c.VkViewport{ - .x = 0, - .y = 0, - .width = @floatFromInt(mip_width), - .height = @floatFromInt(mip_height), - .minDepth = 0.0, - .maxDepth = 1.0, - }; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - - const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); - - // Bind descriptor set - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.bloom.pipeline_layout, 0, 1, &ctx.bloom.descriptor_sets[frame][BLOOM_MIP_COUNT + pass], 0, null); - - // Source dimensions for texel size (upsampling from smaller mip) - const src_mip = target_mip + 1; - const src_width: f32 = @floatFromInt(ctx.bloom.mip_widths[src_mip]); - const src_height: f32 = @floatFromInt(ctx.bloom.mip_heights[src_mip]); - - // Push constants - const push = BloomPushConstants{ - .texel_size = .{ 1.0 / src_width, 1.0 / src_height }, - .threshold_or_radius = 1.0, // filter radius - .soft_threshold_or_intensity = ctx.bloom.intensity, - .mip_level = 0, - }; - c.vkCmdPushConstants(command_buffer, ctx.bloom.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(BloomPushConstants), &push); - - // Draw fullscreen triangle - c.vkCmdDraw(command_buffer, 3, 1, 0, 0); - ctx.draw_call_count += 1; - - c.vkCmdEndRenderPass(command_buffer); - } - - // Transition HDR image back to color attachment layout - barrier.srcAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - barrier.dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - barrier.oldLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.newLayout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - - c.vkCmdPipelineBarrier(command_buffer, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, null, 0, null, 1, &barrier); + ctx.bloom.compute( + command_buffer, + ctx.frames.current_frame, + ctx.hdr.hdr_image, + ctx.swapchain.getExtent(), + &ctx.runtime.draw_call_count, + ); } -// Phase 3: FXAA and Bloom setters fn setFXAA(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.fxaa.enabled = enabled; @@ -3255,46 +198,41 @@ fn setBloomIntensity(ctx_ptr: *anyopaque, intensity: f32) void { ctx.bloom.intensity = intensity; } -fn endFrame(ctx_ptr: *anyopaque) void { +fn setVignetteEnabled(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.mutex.lock(); - defer ctx.mutex.unlock(); - - if (!ctx.frames.frame_in_progress) return; - - if (ctx.main_pass_active) endMainPassInternal(ctx); - if (ctx.shadow_system.pass_active) endShadowPassInternal(ctx); + ctx.post_process_state.vignette_enabled = enabled; +} - // If post-process pass hasn't run (e.g., UI-only screens), we still need to - // transition the swapchain image to PRESENT_SRC_KHR before presenting. - // Run a minimal post-process pass to do this. - if (!ctx.post_process_ran_this_frame and ctx.post_process_framebuffers.items.len > 0 and ctx.frames.current_image_index < ctx.post_process_framebuffers.items.len) { - beginPostProcessPassInternal(ctx); - // Draw fullscreen triangle for post-process (tone mapping) - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - c.vkCmdDraw(command_buffer, 3, 1, 0, 0); - ctx.draw_call_count += 1; - } - if (ctx.post_process_pass_active) endPostProcessPassInternal(ctx); +fn setVignetteIntensity(ctx_ptr: *anyopaque, intensity: f32) void { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ctx.post_process_state.vignette_intensity = intensity; +} - // If FXAA is enabled and post-process ran but FXAA hasn't, run FXAA pass - // (Post-process outputs to intermediate texture when FXAA is enabled) - if (ctx.fxaa.enabled and ctx.post_process_ran_this_frame and !ctx.fxaa_ran_this_frame) { - beginFXAAPassInternal(ctx); - } - if (ctx.fxaa.pass_active) endFXAAPassInternal(ctx); +fn setFilmGrainEnabled(ctx_ptr: *anyopaque, enabled: bool) void { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ctx.post_process_state.film_grain_enabled = enabled; +} - const transfer_cb = ctx.resources.getTransferCommandBuffer(); +fn setFilmGrainIntensity(ctx_ptr: *anyopaque, intensity: f32) void { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ctx.post_process_state.film_grain_intensity = intensity; +} - ctx.frames.endFrame(&ctx.swapchain, transfer_cb) catch |err| { - std.log.err("endFrame failed: {}", .{err}); - }; +fn setColorGradingEnabled(ctx_ptr: *anyopaque, enabled: bool) void { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ctx.post_process_state.color_grading_enabled = enabled; +} - if (transfer_cb != null) { - ctx.resources.resetTransferState(); - } +fn setColorGradingIntensity(ctx_ptr: *anyopaque, intensity: f32) void { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ctx.post_process_state.color_grading_intensity = intensity; +} - ctx.frame_index += 1; +fn endFrame(ctx_ptr: *anyopaque) void { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ctx.mutex.lock(); + defer ctx.mutex.unlock(); + pass_orchestration.endFrame(ctx); } fn setClearColor(ctx_ptr: *anyopaque, color: Vec3) void { @@ -3302,488 +240,83 @@ fn setClearColor(ctx_ptr: *anyopaque, color: Vec3) void { const r = if (std.math.isFinite(color.x)) color.x else 0.0; const g = if (std.math.isFinite(color.y)) color.y else 0.0; const b = if (std.math.isFinite(color.z)) color.z else 0.0; - ctx.clear_color = .{ r, g, b, 1.0 }; -} - -fn transitionShadowImage(ctx: *VulkanContext, cascade_index: u32, new_layout: c.VkImageLayout) void { - if (cascade_index >= rhi.SHADOW_CASCADE_COUNT) return; - if (ctx.shadow_system.shadow_image == null) return; - - const old_layout = ctx.shadow_system.shadow_image_layouts[cascade_index]; - if (old_layout == new_layout) return; - - var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); - barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.oldLayout = if (new_layout == c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) c.VK_IMAGE_LAYOUT_UNDEFINED else old_layout; - barrier.newLayout = new_layout; - barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.image = ctx.shadow_system.shadow_image; - barrier.subresourceRange.aspectMask = c.VK_IMAGE_ASPECT_DEPTH_BIT; - barrier.subresourceRange.baseMipLevel = 0; - barrier.subresourceRange.levelCount = 1; - barrier.subresourceRange.baseArrayLayer = @intCast(cascade_index); - barrier.subresourceRange.layerCount = 1; - - var src_stage: c.VkPipelineStageFlags = c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; - var dst_stage: c.VkPipelineStageFlags = c.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; - - if (new_layout == c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { - barrier.srcAccessMask = 0; - barrier.dstAccessMask = c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; - src_stage = c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; - dst_stage = c.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; - } else if (old_layout == c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL and new_layout == c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { - barrier.srcAccessMask = c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; - barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - src_stage = c.VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; - dst_stage = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; - } - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - c.vkCmdPipelineBarrier(command_buffer, src_stage, dst_stage, 0, 0, null, 0, null, 1, &barrier); - ctx.shadow_system.shadow_image_layouts[cascade_index] = new_layout; -} - -fn beginMainPassInternal(ctx: *VulkanContext) void { - if (!ctx.frames.frame_in_progress) return; - if (ctx.swapchain.getExtent().width == 0 or ctx.swapchain.getExtent().height == 0) return; - - // Safety: Ensure render pass and framebuffer are valid - if (ctx.hdr_render_pass == null) { - std.debug.print("beginMainPass: hdr_render_pass is null, creating...\n", .{}); - createMainRenderPass(ctx) catch |err| { - std.log.err("beginMainPass: failed to recreate render pass: {}", .{err}); - return; - }; - } - if (ctx.main_framebuffer == null) { - std.debug.print("beginMainPass: main_framebuffer is null, creating...\n", .{}); - createMainFramebuffers(ctx) catch |err| { - std.log.err("beginMainPass: failed to recreate framebuffer: {}", .{err}); - return; - }; - } - if (ctx.main_framebuffer == null) return; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - if (!ctx.main_pass_active) { - ensureNoRenderPassActiveInternal(ctx); - - // Ensure HDR image is in correct layout for resolve - if (ctx.hdr_image != null) { - var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); - barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.oldLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.newLayout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.image = ctx.hdr_image; - barrier.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; - barrier.srcAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - barrier.dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - - c.vkCmdPipelineBarrier(command_buffer, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, null, 0, null, 1, &barrier); - } - - ctx.terrain_pipeline_bound = false; - - var render_pass_info = std.mem.zeroes(c.VkRenderPassBeginInfo); - render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - render_pass_info.renderPass = ctx.hdr_render_pass; - render_pass_info.framebuffer = ctx.main_framebuffer; - render_pass_info.renderArea.offset = .{ .x = 0, .y = 0 }; - render_pass_info.renderArea.extent = ctx.swapchain.getExtent(); - - var clear_values: [3]c.VkClearValue = undefined; - clear_values[0] = std.mem.zeroes(c.VkClearValue); - clear_values[0].color = .{ .float32 = ctx.clear_color }; - clear_values[1] = std.mem.zeroes(c.VkClearValue); - clear_values[1].depthStencil = .{ .depth = 0.0, .stencil = 0 }; - - if (ctx.msaa_samples > 1) { - clear_values[2] = std.mem.zeroes(c.VkClearValue); - clear_values[2].color = .{ .float32 = ctx.clear_color }; - render_pass_info.clearValueCount = 3; - } else { - render_pass_info.clearValueCount = 2; - } - render_pass_info.pClearValues = &clear_values[0]; - - // std.debug.print("beginMainPass: calling vkCmdBeginRenderPass (cb={}, rp={}, fb={})\n", .{ command_buffer != null, ctx.hdr_render_pass != null, ctx.main_framebuffer != null }); - c.vkCmdBeginRenderPass(command_buffer, &render_pass_info, c.VK_SUBPASS_CONTENTS_INLINE); - ctx.main_pass_active = true; - ctx.lod_mode = false; - } - - var viewport = std.mem.zeroes(c.VkViewport); - viewport.x = 0.0; - viewport.y = 0.0; - viewport.width = @floatFromInt(ctx.swapchain.getExtent().width); - viewport.height = @floatFromInt(ctx.swapchain.getExtent().height); - viewport.minDepth = 0.0; - viewport.maxDepth = 1.0; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - - var scissor = std.mem.zeroes(c.VkRect2D); - scissor.offset = .{ .x = 0, .y = 0 }; - scissor.extent = ctx.swapchain.getExtent(); - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + ctx.runtime.clear_color = .{ r, g, b, 1.0 }; } fn beginMainPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - beginMainPassInternal(ctx); -} - -fn endMainPassInternal(ctx: *VulkanContext) void { - if (!ctx.main_pass_active) return; - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - c.vkCmdEndRenderPass(command_buffer); - ctx.main_pass_active = false; + pass_orchestration.beginMainPassInternal(ctx); } fn endMainPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - endMainPassInternal(ctx); -} - -fn beginPostProcessPassInternal(ctx: *VulkanContext) void { - if (!ctx.frames.frame_in_progress) return; - if (ctx.post_process_framebuffers.items.len == 0) return; - if (ctx.frames.current_image_index >= ctx.post_process_framebuffers.items.len) return; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - if (!ctx.post_process_pass_active) { - ensureNoRenderPassActiveInternal(ctx); - - // Note: The main render pass already transitions HDR buffer to SHADER_READ_ONLY_OPTIMAL - // via its finalLayout, so no explicit barrier is needed here. - - // When FXAA is enabled, render to intermediate texture; otherwise render to swapchain - const use_fxaa_output = ctx.fxaa.enabled and ctx.fxaa.post_process_to_fxaa_render_pass != null and ctx.fxaa.post_process_to_fxaa_framebuffer != null; - - var render_pass_info = std.mem.zeroes(c.VkRenderPassBeginInfo); - render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - - if (use_fxaa_output) { - render_pass_info.renderPass = ctx.fxaa.post_process_to_fxaa_render_pass; - render_pass_info.framebuffer = ctx.fxaa.post_process_to_fxaa_framebuffer; - } else { - render_pass_info.renderPass = ctx.post_process_render_pass; - render_pass_info.framebuffer = ctx.post_process_framebuffers.items[ctx.frames.current_image_index]; - } - - render_pass_info.renderArea.offset = .{ .x = 0, .y = 0 }; - render_pass_info.renderArea.extent = ctx.swapchain.getExtent(); - - var clear_value = std.mem.zeroes(c.VkClearValue); - clear_value.color = .{ .float32 = .{ 0, 0, 0, 1 } }; - render_pass_info.clearValueCount = 1; - render_pass_info.pClearValues = &clear_value; - - c.vkCmdBeginRenderPass(command_buffer, &render_pass_info, c.VK_SUBPASS_CONTENTS_INLINE); - ctx.post_process_pass_active = true; - ctx.post_process_ran_this_frame = true; - - if (ctx.post_process_pipeline == null) { - std.log.err("Post-process pipeline is null, skipping draw", .{}); - return; - } - - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.post_process_pipeline); - - const pp_ds = ctx.post_process_descriptor_sets[ctx.frames.current_frame]; - if (pp_ds == null) { - std.log.err("Post-process descriptor set is null for frame {}", .{ctx.frames.current_frame}); - return; - } - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.post_process_pipeline_layout, 0, 1, &pp_ds, 0, null); - - // Push bloom parameters - const push = PostProcessPushConstants{ - .bloom_enabled = if (ctx.bloom.enabled) 1.0 else 0.0, - .bloom_intensity = ctx.bloom.intensity, - }; - c.vkCmdPushConstants(command_buffer, ctx.post_process_pipeline_layout, c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(PostProcessPushConstants), &push); - - var viewport = std.mem.zeroes(c.VkViewport); - viewport.x = 0.0; - viewport.y = 0.0; - viewport.width = @floatFromInt(ctx.swapchain.getExtent().width); - viewport.height = @floatFromInt(ctx.swapchain.getExtent().height); - viewport.minDepth = 0.0; - viewport.maxDepth = 1.0; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - - var scissor = std.mem.zeroes(c.VkRect2D); - scissor.offset = .{ .x = 0, .y = 0 }; - scissor.extent = ctx.swapchain.getExtent(); - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); - } + pass_orchestration.endMainPassInternal(ctx); } fn beginPostProcessPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - beginPostProcessPassInternal(ctx); -} - -fn endPostProcessPassInternal(ctx: *VulkanContext) void { - if (!ctx.post_process_pass_active) return; - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - c.vkCmdEndRenderPass(command_buffer); - ctx.post_process_pass_active = false; + pass_orchestration.beginPostProcessPassInternal(ctx); } fn endPostProcessPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - endPostProcessPassInternal(ctx); + pass_orchestration.endPostProcessPassInternal(ctx); } fn waitIdle(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.dry_run and ctx.vulkan_device.vk_device != null) { - _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); - } + state_control.waitIdle(ctx); } fn updateGlobalUniforms(ctx_ptr: *anyopaque, view_proj: Mat4, cam_pos: Vec3, sun_dir: Vec3, sun_color: Vec3, time_val: f32, fog_color: Vec3, fog_density: f32, fog_enabled: bool, sun_intensity: f32, ambient: f32, use_texture: bool, cloud_params: rhi.CloudParams) anyerror!void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - - const global_uniforms = GlobalUniforms{ - .view_proj = view_proj, - .view_proj_prev = ctx.view_proj_prev, - .cam_pos = .{ cam_pos.x, cam_pos.y, cam_pos.z, 1.0 }, - .sun_dir = .{ sun_dir.x, sun_dir.y, sun_dir.z, 0.0 }, - .sun_color = .{ sun_color.x, sun_color.y, sun_color.z, 1.0 }, - .fog_color = .{ fog_color.x, fog_color.y, fog_color.z, 1.0 }, - .cloud_wind_offset = .{ cloud_params.wind_offset_x, cloud_params.wind_offset_z, cloud_params.cloud_scale, cloud_params.cloud_coverage }, - .params = .{ time_val, fog_density, if (fog_enabled) 1.0 else 0.0, sun_intensity }, - .lighting = .{ ambient, if (use_texture) 1.0 else 0.0, if (cloud_params.pbr_enabled) 1.0 else 0.0, cloud_params.shadow.distance }, // Use shadow distance as a placeholder for strength if needed - .cloud_params = .{ cloud_params.cloud_height, @floatFromInt(cloud_params.shadow.pcf_samples), if (cloud_params.shadow.cascade_blend) 1.0 else 0.0, if (cloud_params.cloud_shadows) 1.0 else 0.0 }, - .pbr_params = .{ @floatFromInt(cloud_params.pbr_quality), cloud_params.exposure, cloud_params.saturation, if (cloud_params.ssao_enabled) 1.0 else 0.0 }, - .volumetric_params = .{ if (cloud_params.volumetric_enabled) 1.0 else 0.0, cloud_params.volumetric_density, @floatFromInt(cloud_params.volumetric_steps), cloud_params.volumetric_scattering }, - .viewport_size = .{ @floatFromInt(ctx.swapchain.swapchain.extent.width), @floatFromInt(ctx.swapchain.swapchain.extent.height), if (ctx.debug_shadows_active) 1.0 else 0.0, 0.0 }, - }; - - try ctx.descriptors.updateGlobalUniforms(ctx.frames.current_frame, &global_uniforms); - ctx.view_proj_prev = view_proj; + try render_state.updateGlobalUniforms(ctx, view_proj, cam_pos, sun_dir, sun_color, time_val, fog_color, fog_density, fog_enabled, sun_intensity, ambient, use_texture, cloud_params); } fn setModelMatrix(ctx_ptr: *anyopaque, model: Mat4, color: Vec3, mask_radius: f32) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.current_model = model; - ctx.current_color = .{ color.x, color.y, color.z }; - ctx.current_mask_radius = mask_radius; + render_state.setModelMatrix(ctx, model, color, mask_radius); } fn setInstanceBuffer(ctx_ptr: *anyopaque, handle: rhi.BufferHandle) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - ctx.pending_instance_buffer = handle; - ctx.lod_mode = false; - applyPendingDescriptorUpdates(ctx, ctx.frames.current_frame); + render_state.setInstanceBuffer(ctx, handle); } fn setLODInstanceBuffer(ctx_ptr: *anyopaque, handle: rhi.BufferHandle) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - ctx.pending_lod_instance_buffer = handle; - ctx.lod_mode = true; - applyPendingDescriptorUpdates(ctx, ctx.frames.current_frame); + render_state.setLODInstanceBuffer(ctx, handle); } fn setSelectionMode(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.selection_mode = enabled; -} - -fn applyPendingDescriptorUpdates(ctx: *VulkanContext, frame_index: usize) void { - if (ctx.pending_instance_buffer != 0 and ctx.bound_instance_buffer[frame_index] != ctx.pending_instance_buffer) { - const buf_opt = ctx.resources.buffers.get(ctx.pending_instance_buffer); - - if (buf_opt) |buf| { - var buffer_info = c.VkDescriptorBufferInfo{ - .buffer = buf.buffer, - .offset = 0, - .range = buf.size, - }; - - var write = std.mem.zeroes(c.VkWriteDescriptorSet); - write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - write.dstSet = ctx.descriptors.descriptor_sets[frame_index]; - write.dstBinding = 5; // Instance SSBO - write.descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; - write.descriptorCount = 1; - write.pBufferInfo = &buffer_info; - - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write, 0, null); - ctx.bound_instance_buffer[frame_index] = ctx.pending_instance_buffer; - } - } - - if (ctx.pending_lod_instance_buffer != 0 and ctx.bound_lod_instance_buffer[frame_index] != ctx.pending_lod_instance_buffer) { - const buf_opt = ctx.resources.buffers.get(ctx.pending_lod_instance_buffer); - - if (buf_opt) |buf| { - var buffer_info = c.VkDescriptorBufferInfo{ - .buffer = buf.buffer, - .offset = 0, - .range = buf.size, - }; - - var write = std.mem.zeroes(c.VkWriteDescriptorSet); - write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - write.dstSet = ctx.descriptors.lod_descriptor_sets[frame_index]; - write.dstBinding = 5; // Instance SSBO - write.descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; - write.descriptorCount = 1; - write.pBufferInfo = &buffer_info; - - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write, 0, null); - ctx.bound_lod_instance_buffer[frame_index] = ctx.pending_lod_instance_buffer; - } - } + render_state.setSelectionMode(ctx, enabled); } fn setTextureUniforms(ctx_ptr: *anyopaque, texture_enabled: bool, shadow_map_handles: [rhi.SHADOW_CASCADE_COUNT]rhi.TextureHandle) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.textures_enabled = texture_enabled; _ = shadow_map_handles; - // Force descriptor update so internal shadow maps are bound - ctx.descriptors_updated = false; + state_control.setTextureUniforms(ctx, texture_enabled); } fn beginCloudPass(ctx_ptr: *anyopaque, params: rhi.CloudParams) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - - ctx.mutex.lock(); - defer ctx.mutex.unlock(); - - if (!ctx.main_pass_active) beginMainPassInternal(ctx); - if (!ctx.main_pass_active) return; - - // Use dedicated cloud pipeline - if (ctx.cloud_pipeline == null) return; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - // Bind cloud pipeline - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.cloud_pipeline); - ctx.terrain_pipeline_bound = false; - - // CloudPushConstants: mat4 view_proj + 4 vec4s = 128 bytes - const CloudPushConstants = extern struct { - view_proj: [4][4]f32, - camera_pos: [4]f32, // xyz = camera position, w = cloud_height - cloud_params: [4]f32, // x = coverage, y = scale, z = wind_offset_x, w = wind_offset_z - sun_params: [4]f32, // xyz = sun_dir, w = sun_intensity - fog_params: [4]f32, // xyz = fog_color, w = fog_density - }; - - const pc = CloudPushConstants{ - .view_proj = params.view_proj.data, - .camera_pos = .{ params.cam_pos.x, params.cam_pos.y, params.cam_pos.z, params.cloud_height }, - .cloud_params = .{ params.cloud_coverage, params.cloud_scale, params.wind_offset_x, params.wind_offset_z }, - .sun_params = .{ params.sun_dir.x, params.sun_dir.y, params.sun_dir.z, params.sun_intensity }, - .fog_params = .{ params.fog_color.x, params.fog_color.y, params.fog_color.z, params.fog_density }, - }; - - c.vkCmdPushConstants(command_buffer, ctx.cloud_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(CloudPushConstants), &pc); -} - -fn drawDepthTexture(ctx_ptr: *anyopaque, texture: rhi.TextureHandle, rect: rhi.Rect) void { - if (comptime !build_options.debug_shadows) return; - const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress or !ctx.ui_in_progress) return; - - if (ctx.debug_shadow.pipeline == null) return; - - // 1. Flush normal UI if any - flushUI(ctx); - - const tex_opt = ctx.resources.textures.get(texture); - if (tex_opt == null) { - std.log.err("drawDepthTexture: Texture handle {} not found in textures map!", .{texture}); - return; - } - const tex = tex_opt.?; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - // 2. Bind Debug Shadow Pipeline - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.debug_shadow.pipeline.?); - ctx.terrain_pipeline_bound = false; - - // 3. Set up orthographic projection for UI-sized quad - const width_f32 = ctx.ui_screen_width; - const height_f32 = ctx.ui_screen_height; - const proj = Mat4.orthographic(0, width_f32, height_f32, 0, -1, 1); - c.vkCmdPushConstants(command_buffer, ctx.debug_shadow.pipeline_layout.?, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); - - // 4. Update & Bind Descriptor Set - var image_info = std.mem.zeroes(c.VkDescriptorImageInfo); - image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - image_info.imageView = tex.view; - image_info.sampler = tex.sampler; - - const frame = ctx.frames.current_frame; - const idx = ctx.debug_shadow.descriptor_next[frame]; - const pool_len = ctx.debug_shadow.descriptor_pool[frame].len; - ctx.debug_shadow.descriptor_next[frame] = @intCast((idx + 1) % pool_len); - const ds = ctx.debug_shadow.descriptor_pool[frame][idx] orelse return; - - var write_set = std.mem.zeroes(c.VkWriteDescriptorSet); - write_set.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - write_set.dstSet = ds; - write_set.dstBinding = 0; - write_set.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - write_set.descriptorCount = 1; - write_set.pImageInfo = &image_info; - - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write_set, 0, null); - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.debug_shadow.pipeline_layout.?, 0, 1, &ds, 0, null); - - // 5. Draw Quad - const debug_x = rect.x; - const debug_y = rect.y; - const debug_w = rect.width; - const debug_h = rect.height; - - const debug_vertices = [_]f32{ - // pos.x, pos.y, uv.x, uv.y - debug_x, debug_y, 0.0, 0.0, - debug_x + debug_w, debug_y, 1.0, 0.0, - debug_x + debug_w, debug_y + debug_h, 1.0, 1.0, - debug_x, debug_y, 0.0, 0.0, - debug_x + debug_w, debug_y + debug_h, 1.0, 1.0, - debug_x, debug_y + debug_h, 0.0, 1.0, - }; - - // Use persistently mapped memory if available - if (ctx.debug_shadow.vbo.mapped_ptr) |ptr| { - @memcpy(@as([*]u8, @ptrCast(ptr))[0..@sizeOf(@TypeOf(debug_vertices))], std.mem.asBytes(&debug_vertices)); - - const offset: c.VkDeviceSize = 0; - c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &ctx.debug_shadow.vbo.buffer, &offset); - c.vkCmdDraw(command_buffer, 6, 1, 0, 0); - } + ctx.mutex.lock(); + defer ctx.mutex.unlock(); + render_state.beginCloudPass(ctx, params); +} - // 6. Restore normal UI state for subsequent calls - const restore_pipeline = getUIPipeline(ctx, false); - if (restore_pipeline != null) { - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, restore_pipeline); - c.vkCmdPushConstants(command_buffer, ctx.ui_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); - } +fn drawDepthTexture(ctx_ptr: *anyopaque, texture: rhi.TextureHandle, rect: rhi.Rect) void { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ui_submission.drawDepthTexture(ctx, texture, rect); } fn createTexture(ctx_ptr: *anyopaque, width: u32, height: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, data_opt: ?[]const u8) rhi.RhiError!rhi.TextureHandle { @@ -3793,6 +326,13 @@ fn createTexture(ctx_ptr: *anyopaque, width: u32, height: u32, format: rhi.Textu return ctx.resources.createTexture(width, height, format, config, data_opt); } +fn createTexture3D(ctx_ptr: *anyopaque, width: u32, height: u32, depth: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, data_opt: ?[]const u8) rhi.RhiError!rhi.TextureHandle { + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + ctx.mutex.lock(); + defer ctx.mutex.unlock(); + return ctx.resources.createTexture3D(width, height, depth, format, config, data_opt); +} + fn destroyTexture(ctx_ptr: *anyopaque, handle: rhi.TextureHandle) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.resources.destroyTexture(handle); @@ -3803,20 +343,23 @@ fn bindTexture(ctx_ptr: *anyopaque, handle: rhi.TextureHandle, slot: u32) void { ctx.mutex.lock(); defer ctx.mutex.unlock(); const resolved = if (handle == 0) switch (slot) { - 6 => ctx.dummy_normal_texture, - 7, 8 => ctx.dummy_roughness_texture, - 9 => ctx.dummy_texture, - 0, 1 => ctx.dummy_texture, - else => ctx.dummy_texture, + 6 => ctx.draw.dummy_normal_texture, + 7, 8 => ctx.draw.dummy_roughness_texture, + 9 => ctx.draw.dummy_texture, + 0, 1 => ctx.draw.dummy_texture, + else => ctx.draw.dummy_texture, } else handle; switch (slot) { - 0, 1 => ctx.current_texture = resolved, - 6 => ctx.current_normal_texture = resolved, - 7 => ctx.current_roughness_texture = resolved, - 8 => ctx.current_displacement_texture = resolved, - 9 => ctx.current_env_texture = resolved, - else => ctx.current_texture = resolved, + 0, 1 => ctx.draw.current_texture = resolved, + 6 => ctx.draw.current_normal_texture = resolved, + 7 => ctx.draw.current_roughness_texture = resolved, + 8 => ctx.draw.current_displacement_texture = resolved, + 9 => ctx.draw.current_env_texture = resolved, + 11 => ctx.draw.current_lpv_texture = resolved, + 12 => ctx.draw.current_lpv_texture_g = resolved, + 13 => ctx.draw.current_lpv_texture_b = resolved, + else => ctx.draw.current_texture = resolved, } } @@ -3829,433 +372,103 @@ fn updateTexture(ctx_ptr: *anyopaque, handle: rhi.TextureHandle, data: []const u fn setViewport(ctx_ptr: *anyopaque, width: u32, height: u32) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - // We use the pixel dimensions from SDL to trigger resizes correctly on High-DPI - const fb_w = width; - const fb_h = height; - _ = fb_w; - _ = fb_h; - - // Use SDL_GetWindowSizeInPixels to check for actual pixel dimension changes - var w: c_int = 0; - var h: c_int = 0; - _ = c.SDL_GetWindowSizeInPixels(ctx.window, &w, &h); - - if (!ctx.swapchain.skip_present and (@as(u32, @intCast(w)) != ctx.swapchain.getExtent().width or @as(u32, @intCast(h)) != ctx.swapchain.getExtent().height)) { - ctx.framebuffer_resized = true; - } - - if (!ctx.frames.frame_in_progress) return; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - var viewport = std.mem.zeroes(c.VkViewport); - viewport.x = 0.0; - viewport.y = 0.0; - viewport.width = @floatFromInt(width); - viewport.height = @floatFromInt(height); - viewport.minDepth = 0.0; - viewport.maxDepth = 1.0; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - - var scissor = std.mem.zeroes(c.VkRect2D); - scissor.offset = .{ .x = 0, .y = 0 }; - scissor.extent = .{ .width = width, .height = height }; - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + state_control.setViewport(ctx, width, height); } fn getAllocator(ctx_ptr: *anyopaque) std.mem.Allocator { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return ctx.allocator; + return state_control.getAllocator(ctx); } fn getFrameIndex(ctx_ptr: *anyopaque) usize { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intCast(ctx.frames.current_frame); + return state_control.getFrameIndex(ctx); } fn supportsIndirectFirstInstance(ctx_ptr: *anyopaque) bool { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return ctx.vulkan_device.draw_indirect_first_instance; + return state_control.supportsIndirectFirstInstance(ctx); } fn recover(ctx_ptr: *anyopaque) anyerror!void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.gpu_fault_detected) return; - - if (ctx.vulkan_device.recovery_count >= ctx.vulkan_device.max_recovery_attempts) { - std.log.err("RHI: Max recovery attempts ({d}) exceeded. GPU is unstable.", .{ctx.vulkan_device.max_recovery_attempts}); - return error.GpuLost; - } - - ctx.vulkan_device.recovery_count += 1; - std.log.info("RHI: Attempting GPU recovery (Attempt {d}/{d})...", .{ ctx.vulkan_device.recovery_count, ctx.vulkan_device.max_recovery_attempts }); - - // Best effort: wait for idle - _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); - - // If robustness2 is working, the device might not be "lost" in the Vulkan sense, - // but we might have hit a corner case. - // Full recovery requires recreating the logical device and all resources. - // For now, we reset the flag and recreate the swapchain. - // Limitation: If the device is truly lost (VK_ERROR_DEVICE_LOST returned everywhere), - // this soft recovery will likely fail or loop. Full engine restart is recommended for true TDRs. - // TODO: Implement hard recovery (recreateDevice) which would: - // 1. Destroy logical device and all resources - // 2. Re-initialize device via VulkanDevice.init - // 3. Re-create all RHI resources (buffers, textures, pipelines) - // 4. Restore application state - ctx.gpu_fault_detected = false; - recreateSwapchain(ctx); - - // Basic verification: Check if device is responsive - if (c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device) != c.VK_SUCCESS) { - std.log.err("RHI: Device unresponsive after recovery. Recovery failed.", .{}); - ctx.vulkan_device.recovery_fail_count += 1; - ctx.gpu_fault_detected = true; // Re-flag to prevent further submissions - return error.GpuLost; - } - - ctx.vulkan_device.recovery_success_count += 1; - std.log.info("RHI: Recovery step complete. If issues persist, please restart.", .{}); + try state_control.recover(ctx); } fn setWireframe(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (ctx.wireframe_enabled != enabled) { - ctx.wireframe_enabled = enabled; - // Force pipeline rebind next draw - ctx.terrain_pipeline_bound = false; - } + state_control.setWireframe(ctx, enabled); } fn setTexturesEnabled(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.textures_enabled = enabled; - // Texture toggle is handled in shader via UBO uniform + state_control.setTexturesEnabled(ctx, enabled); } fn setDebugShadowView(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.debug_shadows_active = enabled; - // Debug shadow view is handled in shader via viewport_size.z uniform + state_control.setDebugShadowView(ctx, enabled); } fn setVSync(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (ctx.vsync_enabled == enabled) return; - - ctx.vsync_enabled = enabled; - - // Query available present modes - var mode_count: u32 = 0; - _ = c.vkGetPhysicalDeviceSurfacePresentModesKHR(ctx.vulkan_device.physical_device, ctx.vulkan_device.surface, &mode_count, null); - - if (mode_count == 0) return; - - var modes: [8]c.VkPresentModeKHR = undefined; - var actual_count: u32 = @min(mode_count, 8); - _ = c.vkGetPhysicalDeviceSurfacePresentModesKHR(ctx.vulkan_device.physical_device, ctx.vulkan_device.surface, &actual_count, &modes); - - // Select present mode based on vsync preference - if (enabled) { - // VSync ON: FIFO is always available - ctx.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; - } else { - // VSync OFF: Prefer IMMEDIATE, fallback to MAILBOX, then FIFO - ctx.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; // Default fallback - for (modes[0..actual_count]) |mode| { - if (mode == c.VK_PRESENT_MODE_IMMEDIATE_KHR) { - ctx.present_mode = c.VK_PRESENT_MODE_IMMEDIATE_KHR; - break; - } else if (mode == c.VK_PRESENT_MODE_MAILBOX_KHR) { - ctx.present_mode = c.VK_PRESENT_MODE_MAILBOX_KHR; - // Don't break, keep looking for IMMEDIATE - } - } - } - - // Trigger swapchain recreation on next frame - ctx.framebuffer_resized = true; - - const mode_name: []const u8 = switch (ctx.present_mode) { - c.VK_PRESENT_MODE_IMMEDIATE_KHR => "IMMEDIATE (VSync OFF)", - c.VK_PRESENT_MODE_MAILBOX_KHR => "MAILBOX (Triple Buffer)", - c.VK_PRESENT_MODE_FIFO_KHR => "FIFO (VSync ON)", - c.VK_PRESENT_MODE_FIFO_RELAXED_KHR => "FIFO_RELAXED", - else => "UNKNOWN", - }; - std.log.info("Vulkan present mode: {s}", .{mode_name}); + state_control.setVSync(ctx, enabled); } fn setAnisotropicFiltering(ctx_ptr: *anyopaque, level: u8) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (ctx.anisotropic_filtering == level) return; - ctx.anisotropic_filtering = level; - // Recreate sampler logic is complex as it requires recreating all texture samplers - // For now, we rely on application restart or next resource load for full effect, - // or implement dynamic sampler updates if critical. - // Given the architecture, recreating swapchain/resources often happens on setting change anyway. + state_control.setAnisotropicFiltering(ctx, level); } fn setVolumetricDensity(ctx_ptr: *anyopaque, density: f32) void { - // This is just a parameter update for the next frame's uniform update - // No immediate Vulkan action required other than ensuring the value is used. - // Since uniforms are updated every frame from App settings in main loop, - // this specific setter might just be a placeholder or hook for future optimization. - _ = ctx_ptr; - _ = density; + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + state_control.setVolumetricDensity(ctx, density); } fn setMSAA(ctx_ptr: *anyopaque, samples: u8) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - const clamped = @min(samples, ctx.vulkan_device.max_msaa_samples); - if (ctx.msaa_samples == clamped) return; - - ctx.msaa_samples = clamped; - ctx.swapchain.msaa_samples = clamped; - ctx.framebuffer_resized = true; // Triggers recreateSwapchain on next frame - ctx.pipeline_rebuild_needed = true; - std.log.info("Vulkan MSAA set to {}x (pending swapchain recreation)", .{clamped}); + state_control.setMSAA(ctx, samples); } fn getMaxAnisotropy(ctx_ptr: *anyopaque) u8 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromFloat(@min(ctx.vulkan_device.max_anisotropy, 16.0)); + return state_control.getMaxAnisotropy(ctx); } fn getMaxMSAASamples(ctx_ptr: *anyopaque) u8 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return ctx.vulkan_device.max_msaa_samples; + return state_control.getMaxMSAASamples(ctx); } fn getFaultCount(ctx_ptr: *anyopaque) u32 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return ctx.vulkan_device.fault_count; + return state_control.getFaultCount(ctx); } fn getValidationErrorCount(ctx_ptr: *anyopaque) u32 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return ctx.vulkan_device.validation_error_count.load(.monotonic); + return state_control.getValidationErrorCount(ctx); } fn drawIndexed(ctx_ptr: *anyopaque, vbo_handle: rhi.BufferHandle, ebo_handle: rhi.BufferHandle, count: u32) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - ctx.mutex.lock(); defer ctx.mutex.unlock(); - - if (!ctx.main_pass_active and !ctx.shadow_system.pass_active and !ctx.g_pass_active) beginMainPassInternal(ctx); - - if (!ctx.main_pass_active and !ctx.shadow_system.pass_active and !ctx.g_pass_active) return; - - const vbo_opt = ctx.resources.buffers.get(vbo_handle); - const ebo_opt = ctx.resources.buffers.get(ebo_handle); - - if (vbo_opt) |vbo| { - if (ebo_opt) |ebo| { - ctx.draw_call_count += 1; - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - // Use simple pipeline binding logic - if (!ctx.terrain_pipeline_bound) { - const selected_pipeline = if (ctx.wireframe_enabled and ctx.wireframe_pipeline != null) - ctx.wireframe_pipeline - else - ctx.pipeline; - if (selected_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); - ctx.terrain_pipeline_bound = true; - } - - const descriptor_set = if (ctx.lod_mode) - &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] - else - &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_layout, 0, 1, descriptor_set, 0, null); - - const offset: c.VkDeviceSize = 0; - c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &vbo.buffer, &offset); - c.vkCmdBindIndexBuffer(command_buffer, ebo.buffer, 0, c.VK_INDEX_TYPE_UINT16); - c.vkCmdDrawIndexed(command_buffer, count, 1, 0, 0, 0); - } - } + draw_submission.drawIndexed(ctx, vbo_handle, ebo_handle, count); } fn drawIndirect(ctx_ptr: *anyopaque, handle: rhi.BufferHandle, command_buffer: rhi.BufferHandle, offset: usize, draw_count: u32, stride: u32) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - ctx.mutex.lock(); defer ctx.mutex.unlock(); - - if (!ctx.main_pass_active and !ctx.shadow_system.pass_active and !ctx.g_pass_active) beginMainPassInternal(ctx); - - if (!ctx.main_pass_active and !ctx.shadow_system.pass_active and !ctx.g_pass_active) return; - - const use_shadow = ctx.shadow_system.pass_active; - const use_g_pass = ctx.g_pass_active; - - const vbo_opt = ctx.resources.buffers.get(handle); - const cmd_opt = ctx.resources.buffers.get(command_buffer); - - if (vbo_opt) |vbo| { - if (cmd_opt) |cmd| { - ctx.draw_call_count += 1; - const cb = ctx.frames.command_buffers[ctx.frames.current_frame]; - - if (use_shadow) { - if (!ctx.shadow_system.pipeline_bound) { - if (ctx.shadow_system.shadow_pipeline == null) return; - c.vkCmdBindPipeline(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.shadow_system.shadow_pipeline); - ctx.shadow_system.pipeline_bound = true; - } - } else if (use_g_pass) { - if (ctx.g_pipeline == null) return; - c.vkCmdBindPipeline(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.g_pipeline); - } else { - if (!ctx.terrain_pipeline_bound) { - const selected_pipeline = if (ctx.wireframe_enabled and ctx.wireframe_pipeline != null) - ctx.wireframe_pipeline - else - ctx.pipeline; - if (selected_pipeline == null) { - std.log.warn("drawIndirect: main pipeline (selected_pipeline) is null - cannot draw terrain", .{}); - return; - } - c.vkCmdBindPipeline(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); - ctx.terrain_pipeline_bound = true; - } - } - - const descriptor_set = if (!use_shadow and ctx.lod_mode) - &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] - else - &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - c.vkCmdBindDescriptorSets(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_layout, 0, 1, descriptor_set, 0, null); - - if (use_shadow) { - const cascade_index = ctx.shadow_system.pass_index; - const texel_size = ctx.shadow_texel_sizes[cascade_index]; - const shadow_uniforms = ShadowModelUniforms{ - .mvp = ctx.shadow_system.pass_matrix, - .bias_params = .{ 2.0, 1.0, @floatFromInt(cascade_index), texel_size }, - }; - c.vkCmdPushConstants(cb, ctx.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ShadowModelUniforms), &shadow_uniforms); - } else { - const uniforms = ModelUniforms{ - .model = Mat4.identity, - .color = .{ 1.0, 1.0, 1.0 }, - .mask_radius = 0, - }; - c.vkCmdPushConstants(cb, ctx.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ModelUniforms), &uniforms); - } - - const offset_vals = [_]c.VkDeviceSize{0}; - c.vkCmdBindVertexBuffers(cb, 0, 1, &vbo.buffer, &offset_vals); - - if (cmd.is_host_visible and draw_count > 0 and stride > 0) { - const stride_bytes: usize = @intCast(stride); - const map_size: usize = @as(usize, @intCast(draw_count)) * stride_bytes; - const cmd_size: usize = @intCast(cmd.size); - if (offset <= cmd_size and map_size <= cmd_size - offset) { - if (cmd.mapped_ptr) |ptr| { - const base = @as([*]const u8, @ptrCast(ptr)) + offset; - var draw_index: u32 = 0; - while (draw_index < draw_count) : (draw_index += 1) { - const cmd_ptr = @as(*const rhi.DrawIndirectCommand, @ptrCast(@alignCast(base + @as(usize, draw_index) * stride_bytes))); - const draw_cmd = cmd_ptr.*; - if (draw_cmd.vertexCount == 0 or draw_cmd.instanceCount == 0) continue; - c.vkCmdDraw(cb, draw_cmd.vertexCount, draw_cmd.instanceCount, draw_cmd.firstVertex, draw_cmd.firstInstance); - } - return; - } - } else { - std.log.warn("drawIndirect: command buffer range out of bounds (offset={}, size={}, buffer={})", .{ offset, map_size, cmd_size }); - } - } - - if (ctx.vulkan_device.multi_draw_indirect) { - c.vkCmdDrawIndirect(cb, cmd.buffer, @intCast(offset), draw_count, stride); - } else { - const stride_bytes: usize = @intCast(stride); - var draw_index: u32 = 0; - while (draw_index < draw_count) : (draw_index += 1) { - const draw_offset = offset + @as(usize, draw_index) * stride_bytes; - c.vkCmdDrawIndirect(cb, cmd.buffer, @intCast(draw_offset), 1, stride); - } - std.log.info("drawIndirect: MDI unsupported - drew {} draws via single-draw fallback", .{draw_count}); - } - } - } + draw_submission.drawIndirect(ctx, handle, command_buffer, offset, draw_count, stride); } fn drawInstance(ctx_ptr: *anyopaque, handle: rhi.BufferHandle, count: u32, instance_index: u32) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - ctx.mutex.lock(); defer ctx.mutex.unlock(); - - if (!ctx.main_pass_active and !ctx.shadow_system.pass_active and !ctx.g_pass_active) beginMainPassInternal(ctx); - - const use_shadow = ctx.shadow_system.pass_active; - const use_g_pass = ctx.g_pass_active; - - const vbo_opt = ctx.resources.buffers.get(handle); - - if (vbo_opt) |vbo| { - ctx.draw_call_count += 1; - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - if (use_shadow) { - if (!ctx.shadow_system.pipeline_bound) { - if (ctx.shadow_system.shadow_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.shadow_system.shadow_pipeline); - ctx.shadow_system.pipeline_bound = true; - } - } else if (use_g_pass) { - if (ctx.g_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.g_pipeline); - } else { - if (!ctx.terrain_pipeline_bound) { - const selected_pipeline = if (ctx.wireframe_enabled and ctx.wireframe_pipeline != null) - ctx.wireframe_pipeline - else - ctx.pipeline; - if (selected_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); - ctx.terrain_pipeline_bound = true; - } - } - - const descriptor_set = if (!use_shadow and ctx.lod_mode) - &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] - else - &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_layout, 0, 1, descriptor_set, 0, null); - - if (use_shadow) { - const cascade_index = ctx.shadow_system.pass_index; - const texel_size = ctx.shadow_texel_sizes[cascade_index]; - const shadow_uniforms = ShadowModelUniforms{ - .mvp = ctx.shadow_system.pass_matrix.multiply(ctx.current_model), - .bias_params = .{ 2.0, 1.0, @floatFromInt(cascade_index), texel_size }, - }; - c.vkCmdPushConstants(command_buffer, ctx.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ShadowModelUniforms), &shadow_uniforms); - } else { - const uniforms = ModelUniforms{ - .model = Mat4.identity, - .color = .{ 1.0, 1.0, 1.0 }, - .mask_radius = 0, - }; - c.vkCmdPushConstants(command_buffer, ctx.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ModelUniforms), &uniforms); - } - - const offset: c.VkDeviceSize = 0; - c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &vbo.buffer, &offset); - c.vkCmdDraw(command_buffer, count, 1, 0, instance_index); - } + draw_submission.drawInstance(ctx, handle, count, instance_index); } fn draw(ctx_ptr: *anyopaque, handle: rhi.BufferHandle, count: u32, mode: rhi.DrawMode) void { @@ -4264,258 +477,39 @@ fn draw(ctx_ptr: *anyopaque, handle: rhi.BufferHandle, count: u32, mode: rhi.Dra fn drawOffset(ctx_ptr: *anyopaque, handle: rhi.BufferHandle, count: u32, mode: rhi.DrawMode, offset: usize) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - ctx.mutex.lock(); defer ctx.mutex.unlock(); - - // Special case: post-process pass draws fullscreen triangle without VBO - if (ctx.post_process_pass_active) { - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - // Pipeline and descriptor sets are already bound in beginPostProcessPassInternal - c.vkCmdDraw(command_buffer, count, 1, 0, 0); - ctx.draw_call_count += 1; - return; - } - - if (!ctx.main_pass_active and !ctx.shadow_system.pass_active and !ctx.g_pass_active) beginMainPassInternal(ctx); - - if (!ctx.main_pass_active and !ctx.shadow_system.pass_active and !ctx.g_pass_active) return; - - const use_shadow = ctx.shadow_system.pass_active; - const use_g_pass = ctx.g_pass_active; - - const vbo_opt = ctx.resources.buffers.get(handle); - - if (vbo_opt) |vbo| { - const vertex_stride: u64 = @sizeOf(rhi.Vertex); - const required_bytes: u64 = @as(u64, offset) + @as(u64, count) * vertex_stride; - if (required_bytes > vbo.size) { - std.log.err("drawOffset: vertex buffer overrun (handle={}, offset={}, count={}, size={})", .{ handle, offset, count, vbo.size }); - return; - } - - ctx.draw_call_count += 1; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - // Bind pipeline only if not already bound - if (use_shadow) { - if (!ctx.shadow_system.pipeline_bound) { - if (ctx.shadow_system.shadow_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.shadow_system.shadow_pipeline); - ctx.shadow_system.pipeline_bound = true; - } - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_layout, 0, 1, &ctx.descriptors.descriptor_sets[ctx.frames.current_frame], 0, null); - } else if (use_g_pass) { - if (ctx.g_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.g_pipeline); - - const descriptor_set = if (ctx.lod_mode) - &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] - else - &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_layout, 0, 1, descriptor_set, 0, null); - } else { - const needs_rebinding = !ctx.terrain_pipeline_bound or ctx.selection_mode or mode == .lines; - if (needs_rebinding) { - const selected_pipeline = if (ctx.selection_mode and ctx.selection_pipeline != null) - ctx.selection_pipeline - else if (mode == .lines and ctx.line_pipeline != null) - ctx.line_pipeline - else if (ctx.wireframe_enabled and ctx.wireframe_pipeline != null) - ctx.wireframe_pipeline - else - ctx.pipeline; - if (selected_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); - // Mark bound only if it's the main terrain pipeline - ctx.terrain_pipeline_bound = (selected_pipeline == ctx.pipeline); - } - - const descriptor_set = if (ctx.lod_mode) - &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] - else - &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_layout, 0, 1, descriptor_set, 0, null); - } - - if (use_shadow) { - const cascade_index = ctx.shadow_system.pass_index; - const texel_size = ctx.shadow_texel_sizes[cascade_index]; - const shadow_uniforms = ShadowModelUniforms{ - .mvp = ctx.shadow_system.pass_matrix.multiply(ctx.current_model), - .bias_params = .{ 2.0, 1.0, @floatFromInt(cascade_index), texel_size }, - }; - c.vkCmdPushConstants(command_buffer, ctx.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ShadowModelUniforms), &shadow_uniforms); - } else { - const uniforms = ModelUniforms{ - .model = ctx.current_model, - .color = ctx.current_color, - .mask_radius = ctx.current_mask_radius, - }; - c.vkCmdPushConstants(command_buffer, ctx.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ModelUniforms), &uniforms); - } - - const offset_vbo: c.VkDeviceSize = @intCast(offset); - c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &vbo.buffer, &offset_vbo); - c.vkCmdDraw(command_buffer, count, 1, 0, 0); - } -} - -fn flushUI(ctx: *VulkanContext) void { - if (!ctx.main_pass_active and !ctx.fxaa.pass_active) { - return; - } - if (ctx.ui_vertex_offset / (6 * @sizeOf(f32)) > ctx.ui_flushed_vertex_count) { - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - const total_vertices: u32 = @intCast(ctx.ui_vertex_offset / (6 * @sizeOf(f32))); - const count = total_vertices - ctx.ui_flushed_vertex_count; - - c.vkCmdDraw(command_buffer, count, 1, ctx.ui_flushed_vertex_count, 0); - ctx.ui_flushed_vertex_count = total_vertices; - } + draw_submission.drawOffset(ctx, handle, count, mode, offset); } fn bindBuffer(ctx_ptr: *anyopaque, handle: rhi.BufferHandle, usage: rhi.BufferUsage) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - ctx.mutex.lock(); defer ctx.mutex.unlock(); - const buf_opt = ctx.resources.buffers.get(handle); - - if (buf_opt) |buf| { - const cb = ctx.frames.command_buffers[ctx.frames.current_frame]; - const offset: c.VkDeviceSize = 0; - switch (usage) { - .vertex => c.vkCmdBindVertexBuffers(cb, 0, 1, &buf.buffer, &offset), - .index => c.vkCmdBindIndexBuffer(cb, buf.buffer, 0, c.VK_INDEX_TYPE_UINT16), - else => {}, - } - } + draw_submission.bindBuffer(ctx, handle, usage); } fn pushConstants(ctx_ptr: *anyopaque, stages: rhi.ShaderStageFlags, offset: u32, size: u32, data: *const anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - - var vk_stages: c.VkShaderStageFlags = 0; - if (stages.vertex) vk_stages |= c.VK_SHADER_STAGE_VERTEX_BIT; - if (stages.fragment) vk_stages |= c.VK_SHADER_STAGE_FRAGMENT_BIT; - if (stages.compute) vk_stages |= c.VK_SHADER_STAGE_COMPUTE_BIT; - - const cb = ctx.frames.command_buffers[ctx.frames.current_frame]; - // Currently we only have one main pipeline layout used for everything. - // In a more SOLID system, we'd bind the layout associated with the current shader. - c.vkCmdPushConstants(cb, ctx.pipeline_layout, vk_stages, offset, size, data); + draw_submission.pushConstants(ctx, stages, offset, size, data); } // 2D Rendering functions fn begin2DPass(ctx_ptr: *anyopaque, screen_width: f32, screen_height: f32) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) { - return; - } - ctx.mutex.lock(); defer ctx.mutex.unlock(); - - const use_swapchain = ctx.post_process_ran_this_frame; - const ui_pipeline = if (use_swapchain) ctx.ui_swapchain_pipeline else ctx.ui_pipeline; - if (ui_pipeline == null) return; - - // If post-process already ran, render UI directly to swapchain (overlay). - // Otherwise, use the main HDR pass so post-process can include UI. - if (use_swapchain) { - if (!ctx.fxaa.pass_active) { - beginFXAAPassForUI(ctx); - } - if (!ctx.fxaa.pass_active) return; - } else { - if (!ctx.main_pass_active) beginMainPassInternal(ctx); - if (!ctx.main_pass_active) return; - } - - ctx.ui_using_swapchain = use_swapchain; - - ctx.ui_screen_width = screen_width; - ctx.ui_screen_height = screen_height; - ctx.ui_in_progress = true; - - // Use persistently mapped memory if available - const ui_vbo = ctx.ui_vbos[ctx.frames.current_frame]; - if (ui_vbo.mapped_ptr) |ptr| { - ctx.ui_mapped_ptr = ptr; - } else { - std.log.err("UI VBO memory not mapped!", .{}); - } - - // Bind UI pipeline and VBO - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ui_pipeline); - ctx.terrain_pipeline_bound = false; - - const offset_val: c.VkDeviceSize = 0; - c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &ui_vbo.buffer, &offset_val); - - // Set orthographic projection - const proj = Mat4.orthographic(0, ctx.ui_screen_width, ctx.ui_screen_height, 0, -1, 1); - c.vkCmdPushConstants(command_buffer, ctx.ui_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); - - // Force Viewport/Scissor to match UI screen size - const viewport = c.VkViewport{ .x = 0, .y = 0, .width = ctx.ui_screen_width, .height = ctx.ui_screen_height, .minDepth = 0, .maxDepth = 1 }; - c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); - const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = @intFromFloat(ctx.ui_screen_width), .height = @intFromFloat(ctx.ui_screen_height) } }; - c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + ui_submission.begin2DPass(ctx, screen_width, screen_height); } fn end2DPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.ui_in_progress) return; - - ctx.ui_mapped_ptr = null; - - flushUI(ctx); - if (ctx.ui_using_swapchain) { - endFXAAPassInternal(ctx); - ctx.ui_using_swapchain = false; - } - ctx.ui_in_progress = false; + ui_submission.end2DPass(ctx); } fn drawRect2D(ctx_ptr: *anyopaque, rect: rhi.Rect, color: rhi.Color) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - - const x = rect.x; - const y = rect.y; - const w = rect.width; - const h = rect.height; - - // Two triangles forming a quad - 6 vertices - const vertices = [_]f32{ - x, y, color.r, color.g, color.b, color.a, - x + w, y, color.r, color.g, color.b, color.a, - x + w, y + h, color.r, color.g, color.b, color.a, - x, y, color.r, color.g, color.b, color.a, - x + w, y + h, color.r, color.g, color.b, color.a, - x, y + h, color.r, color.g, color.b, color.a, - }; - - const size = @sizeOf(@TypeOf(vertices)); - - // Check overflow - const ui_vbo = ctx.ui_vbos[ctx.frames.current_frame]; - if (ctx.ui_vertex_offset + size > ui_vbo.size) { - return; - } - - if (ctx.ui_mapped_ptr) |ptr| { - const dest = @as([*]u8, @ptrCast(ptr)) + ctx.ui_vertex_offset; - @memcpy(dest[0..size], std.mem.asBytes(&vertices)); - ctx.ui_vertex_offset += size; - } + ui_submission.drawRect2D(ctx, rect, color); } const VULKAN_SHADOW_CONTEXT_VTABLE = rhi.IShadowContext.VTable{ @@ -4525,114 +519,14 @@ const VULKAN_SHADOW_CONTEXT_VTABLE = rhi.IShadowContext.VTable{ .getShadowMapHandle = getShadowMapHandle, }; -fn getUIPipeline(ctx: *VulkanContext, textured: bool) c.VkPipeline { - if (ctx.ui_using_swapchain) { - return if (textured) ctx.ui_swapchain_tex_pipeline else ctx.ui_swapchain_pipeline; - } - return if (textured) ctx.ui_tex_pipeline else ctx.ui_pipeline; -} - fn bindUIPipeline(ctx_ptr: *anyopaque, textured: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress) return; - - // Reset this so other pipelines know to rebind if they are called next - ctx.terrain_pipeline_bound = false; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - const pipeline = getUIPipeline(ctx, textured); - if (pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); + ui_submission.bindUIPipeline(ctx, textured); } fn drawTexture2D(ctx_ptr: *anyopaque, texture: rhi.TextureHandle, rect: rhi.Rect) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.frames.frame_in_progress or !ctx.ui_in_progress) return; - - // 1. Flush normal UI if any - flushUI(ctx); - - const tex_opt = ctx.resources.textures.get(texture); - if (tex_opt == null) { - std.log.err("drawTexture2D: Texture handle {} not found in textures map!", .{texture}); - return; - } - const tex = tex_opt.?; - - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - - // 2. Bind Textured UI Pipeline - const textured_pipeline = getUIPipeline(ctx, true); - if (textured_pipeline == null) return; - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, textured_pipeline); - ctx.terrain_pipeline_bound = false; - - // 3. Update & Bind Descriptor Set - var image_info = std.mem.zeroes(c.VkDescriptorImageInfo); - image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - image_info.imageView = tex.view; - image_info.sampler = tex.sampler; - - const frame = ctx.frames.current_frame; - const idx = ctx.ui_tex_descriptor_next[frame]; - const pool_len = ctx.ui_tex_descriptor_pool[frame].len; - ctx.ui_tex_descriptor_next[frame] = @intCast((idx + 1) % pool_len); - const ds = ctx.ui_tex_descriptor_pool[frame][idx]; - - var write = std.mem.zeroes(c.VkWriteDescriptorSet); - write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - write.dstSet = ds; - write.dstBinding = 0; - write.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - write.descriptorCount = 1; - write.pImageInfo = &image_info; - - c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write, 0, null); - c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.ui_tex_pipeline_layout, 0, 1, &ds, 0, null); - - // 4. Set Push Constants (Projection) - const proj = Mat4.orthographic(0, ctx.ui_screen_width, ctx.ui_screen_height, 0, -1, 1); - c.vkCmdPushConstants(command_buffer, ctx.ui_tex_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); - - // 5. Draw - const x = rect.x; - const y = rect.y; - const w = rect.width; - const h = rect.height; - - // Use 6 floats per vertex (stride 24) to match untextured UI layout - // position (2), texcoord (2), padding (2) - const vertices = [_]f32{ - x, y, 0.0, 0.0, 0.0, 0.0, - x + w, y, 1.0, 0.0, 0.0, 0.0, - x + w, y + h, 1.0, 1.0, 0.0, 0.0, - x, y, 0.0, 0.0, 0.0, 0.0, - x + w, y + h, 1.0, 1.0, 0.0, 0.0, - x, y + h, 0.0, 1.0, 0.0, 0.0, - }; - - const size = @sizeOf(@TypeOf(vertices)); - if (ctx.ui_mapped_ptr) |ptr| { - const ui_vbo = ctx.ui_vbos[ctx.frames.current_frame]; - if (ctx.ui_vertex_offset + size <= ui_vbo.size) { - const dest = @as([*]u8, @ptrCast(ptr)) + ctx.ui_vertex_offset; - @memcpy(dest[0..size], std.mem.asBytes(&vertices)); - - const start_vertex = @as(u32, @intCast(ctx.ui_vertex_offset / (6 * @sizeOf(f32)))); - c.vkCmdDraw(command_buffer, 6, 1, start_vertex, 0); - - ctx.ui_vertex_offset += size; - ctx.ui_flushed_vertex_count = @intCast(ctx.ui_vertex_offset / (6 * @sizeOf(f32))); - } - } - - // 6. Restore normal UI state for subsequent calls - const restore_pipeline = getUIPipeline(ctx, false); - if (restore_pipeline != null) { - c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, restore_pipeline); - c.vkCmdPushConstants(command_buffer, ctx.ui_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); - } + ui_submission.drawTexture2D(ctx, texture, rect); } fn createShader(ctx_ptr: *anyopaque, vertex_src: [*c]const u8, fragment_src: [*c]const u8) rhi.RhiError!rhi.ShaderHandle { @@ -4662,132 +556,61 @@ fn bindShader(ctx_ptr: *anyopaque, handle: rhi.ShaderHandle) void { _ = handle; } -fn shaderSetMat4(ctx_ptr: *anyopaque, handle: rhi.ShaderHandle, name: [*c]const u8, matrix: *const [4][4]f32) void { - _ = ctx_ptr; - _ = handle; - _ = name; - _ = matrix; -} - -fn shaderSetVec3(ctx_ptr: *anyopaque, handle: rhi.ShaderHandle, name: [*c]const u8, x: f32, y: f32, z: f32) void { - _ = ctx_ptr; - _ = handle; - _ = name; - _ = x; - _ = y; - _ = z; -} - -fn shaderSetFloat(ctx_ptr: *anyopaque, handle: rhi.ShaderHandle, name: [*c]const u8, value: f32) void { - _ = ctx_ptr; - _ = handle; - _ = name; - _ = value; -} - -fn shaderSetInt(ctx_ptr: *anyopaque, handle: rhi.ShaderHandle, name: [*c]const u8, value: i32) void { - _ = ctx_ptr; - _ = handle; - _ = name; - _ = value; -} - -fn ensureNoRenderPassActiveInternal(ctx: *VulkanContext) void { - if (ctx.main_pass_active) endMainPassInternal(ctx); - if (ctx.shadow_system.pass_active) endShadowPassInternal(ctx); - if (ctx.g_pass_active) endGPassInternal(ctx); - if (ctx.post_process_pass_active) endPostProcessPassInternal(ctx); -} - -fn ensureNoRenderPassActive(ctx_ptr: *anyopaque) void { - const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.mutex.lock(); - defer ctx.mutex.unlock(); - ensureNoRenderPassActiveInternal(ctx); -} - -fn beginShadowPassInternal(ctx: *VulkanContext, cascade_index: u32, light_space_matrix: Mat4) void { - if (!ctx.frames.frame_in_progress) return; - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - ctx.shadow_system.beginPass(command_buffer, cascade_index, light_space_matrix); -} - fn beginShadowPass(ctx_ptr: *anyopaque, cascade_index: u32, light_space_matrix: Mat4) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - beginShadowPassInternal(ctx, cascade_index, light_space_matrix); -} - -fn endShadowPassInternal(ctx: *VulkanContext) void { - const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; - ctx.shadow_system.endPass(command_buffer); + shadow_bridge.beginShadowPassInternal(ctx, cascade_index, light_space_matrix); } fn endShadowPass(ctx_ptr: *anyopaque) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); ctx.mutex.lock(); defer ctx.mutex.unlock(); - endShadowPassInternal(ctx); + shadow_bridge.endShadowPassInternal(ctx); } fn getShadowMapHandle(ctx_ptr: *anyopaque, cascade_index: u32) rhi.TextureHandle { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (cascade_index >= rhi.SHADOW_CASCADE_COUNT) return 0; - return ctx.shadow_map_handles[cascade_index]; + return shadow_bridge.getShadowMapHandle(ctx, cascade_index); } fn updateShadowUniforms(ctx_ptr: *anyopaque, params: rhi.ShadowParams) anyerror!void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - - var splits = [_]f32{ 0, 0, 0, 0 }; - var sizes = [_]f32{ 0, 0, 0, 0 }; - @memcpy(splits[0..rhi.SHADOW_CASCADE_COUNT], ¶ms.cascade_splits); - @memcpy(sizes[0..rhi.SHADOW_CASCADE_COUNT], ¶ms.shadow_texel_sizes); - - @memcpy(&ctx.shadow_texel_sizes, ¶ms.shadow_texel_sizes); - - const shadow_uniforms = ShadowUniforms{ - .light_space_matrices = params.light_space_matrices, - .cascade_splits = splits, - .shadow_texel_sizes = sizes, - }; - - try ctx.descriptors.updateShadowUniforms(ctx.frames.current_frame, &shadow_uniforms); + try shadow_bridge.updateShadowUniforms(ctx, params); } fn getNativeSkyPipeline(ctx_ptr: *anyopaque) u64 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromPtr(ctx.sky_pipeline); + return native_access.getNativeSkyPipeline(ctx); } fn getNativeSkyPipelineLayout(ctx_ptr: *anyopaque) u64 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromPtr(ctx.sky_pipeline_layout); + return native_access.getNativeSkyPipelineLayout(ctx); } fn getNativeCloudPipeline(ctx_ptr: *anyopaque) u64 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromPtr(ctx.cloud_pipeline); + return native_access.getNativeCloudPipeline(ctx); } fn getNativeCloudPipelineLayout(ctx_ptr: *anyopaque) u64 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromPtr(ctx.cloud_pipeline_layout); + return native_access.getNativeCloudPipelineLayout(ctx); } fn getNativeMainDescriptorSet(ctx_ptr: *anyopaque) u64 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromPtr(ctx.descriptors.descriptor_sets[ctx.frames.current_frame]); + return native_access.getNativeMainDescriptorSet(ctx); } fn getNativeCommandBuffer(ctx_ptr: *anyopaque) u64 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromPtr(ctx.frames.command_buffers[ctx.frames.current_frame]); + return native_access.getNativeCommandBuffer(ctx); } fn getNativeSwapchainExtent(ctx_ptr: *anyopaque) [2]u32 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - const extent = ctx.swapchain.getExtent(); - return .{ extent.width, extent.height }; + return native_access.getNativeSwapchainExtent(ctx); } fn getNativeDevice(ctx_ptr: *anyopaque) u64 { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return @intFromPtr(ctx.vulkan_device.vk_device); + return native_access.getNativeDevice(ctx); } fn computeSSAO(ctx_ptr: *anyopaque, proj: Mat4, inv_proj: Mat4) void { @@ -4803,9 +626,8 @@ fn computeSSAO(ctx_ptr: *anyopaque, proj: Mat4, inv_proj: Mat4) void { } fn drawDebugShadowMap(ctx_ptr: *anyopaque, cascade_index: usize, depth_map_handle: rhi.TextureHandle) void { - _ = ctx_ptr; - _ = cascade_index; - _ = depth_map_handle; + const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); + shadow_bridge.drawDebugShadowMap(ctx, cascade_index, depth_map_handle); } const VULKAN_SSAO_VTABLE = rhi.ISSAOContext.VTable{ @@ -4860,6 +682,7 @@ const VULKAN_RHI_VTABLE = rhi.RHI.VTable{ .updateBuffer = updateBuffer, .destroyBuffer = destroyBuffer, .createTexture = createTexture, + .createTexture3D = createTexture3D, .destroyTexture = destroyTexture, .updateTexture = updateTexture, .createShader = createShader, @@ -4924,305 +747,52 @@ const VULKAN_RHI_VTABLE = rhi.RHI.VTable{ .setFXAA = setFXAA, .setBloom = setBloom, .setBloomIntensity = setBloomIntensity, + .setVignetteEnabled = setVignetteEnabled, + .setVignetteIntensity = setVignetteIntensity, + .setFilmGrainEnabled = setFilmGrainEnabled, + .setFilmGrainIntensity = setFilmGrainIntensity, + .setColorGradingEnabled = setColorGradingEnabled, + .setColorGradingIntensity = setColorGradingIntensity, }; -fn mapPassName(name: []const u8) ?GpuPass { - if (std.mem.eql(u8, name, "ShadowPass0")) return .shadow_0; - if (std.mem.eql(u8, name, "ShadowPass1")) return .shadow_1; - if (std.mem.eql(u8, name, "ShadowPass2")) return .shadow_2; - if (std.mem.eql(u8, name, "GPass")) return .g_pass; - if (std.mem.eql(u8, name, "SSAOPass")) return .ssao; - if (std.mem.eql(u8, name, "SkyPass")) return .sky; - if (std.mem.eql(u8, name, "OpaquePass")) return .opaque_pass; - if (std.mem.eql(u8, name, "CloudPass")) return .cloud; - if (std.mem.eql(u8, name, "BloomPass")) return .bloom; - if (std.mem.eql(u8, name, "FXAAPass")) return .fxaa; - if (std.mem.eql(u8, name, "PostProcessPass")) return .post_process; - return null; -} - fn beginPassTiming(ctx_ptr: *anyopaque, pass_name: []const u8) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.timing_enabled or ctx.query_pool == null) return; - - const pass = mapPassName(pass_name) orelse return; - const cmd = ctx.frames.command_buffers[ctx.frames.current_frame]; - if (cmd == null) return; - - const query_index = @as(u32, @intCast(ctx.frames.current_frame * QUERY_COUNT_PER_FRAME)) + @as(u32, @intFromEnum(pass)) * 2; - c.vkCmdWriteTimestamp(cmd, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, ctx.query_pool, query_index); + timing.beginPassTiming(ctx, pass_name); } fn endPassTiming(ctx_ptr: *anyopaque, pass_name: []const u8) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - if (!ctx.timing_enabled or ctx.query_pool == null) return; - - const pass = mapPassName(pass_name) orelse return; - const cmd = ctx.frames.command_buffers[ctx.frames.current_frame]; - if (cmd == null) return; - - const query_index = @as(u32, @intCast(ctx.frames.current_frame * QUERY_COUNT_PER_FRAME)) + @as(u32, @intFromEnum(pass)) * 2 + 1; - c.vkCmdWriteTimestamp(cmd, c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, ctx.query_pool, query_index); + timing.endPassTiming(ctx, pass_name); } fn getTimingResults(ctx_ptr: *anyopaque) rhi.GpuTimingResults { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return ctx.timing_results; + return ctx.timing.timing_results; } fn isTimingEnabled(ctx_ptr: *anyopaque) bool { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - return ctx.timing_enabled; + return ctx.timing.timing_enabled; } fn setTimingEnabled(ctx_ptr: *anyopaque, enabled: bool) void { const ctx: *VulkanContext = @ptrCast(@alignCast(ctx_ptr)); - ctx.timing_enabled = enabled; + ctx.timing.timing_enabled = enabled; } fn processTimingResults(ctx: *VulkanContext) void { - if (!ctx.timing_enabled or ctx.query_pool == null) return; - if (!ctx.timing_enabled or ctx.query_pool == null) return; - if (ctx.frame_index < MAX_FRAMES_IN_FLIGHT) return; - - const frame = ctx.frames.current_frame; - const offset = frame * QUERY_COUNT_PER_FRAME; - var results: [QUERY_COUNT_PER_FRAME]u64 = .{0} ** QUERY_COUNT_PER_FRAME; - - const res = c.vkGetQueryPoolResults( - ctx.vulkan_device.vk_device, - ctx.query_pool, - @intCast(offset), - QUERY_COUNT_PER_FRAME, - @sizeOf(@TypeOf(results)), - &results, - @sizeOf(u64), - c.VK_QUERY_RESULT_64_BIT, - ); - - if (res == c.VK_SUCCESS) { - const period = ctx.vulkan_device.timestamp_period; - - ctx.timing_results.shadow_pass_ms[0] = @as(f32, @floatFromInt(results[1] -% results[0])) * period / 1e6; - ctx.timing_results.shadow_pass_ms[1] = @as(f32, @floatFromInt(results[3] -% results[2])) * period / 1e6; - ctx.timing_results.shadow_pass_ms[2] = @as(f32, @floatFromInt(results[5] -% results[4])) * period / 1e6; - ctx.timing_results.g_pass_ms = @as(f32, @floatFromInt(results[7] -% results[6])) * period / 1e6; - ctx.timing_results.ssao_pass_ms = @as(f32, @floatFromInt(results[9] -% results[8])) * period / 1e6; - ctx.timing_results.sky_pass_ms = @as(f32, @floatFromInt(results[11] -% results[10])) * period / 1e6; - ctx.timing_results.opaque_pass_ms = @as(f32, @floatFromInt(results[13] -% results[12])) * period / 1e6; - ctx.timing_results.cloud_pass_ms = @as(f32, @floatFromInt(results[15] -% results[14])) * period / 1e6; - ctx.timing_results.bloom_pass_ms = @as(f32, @floatFromInt(results[17] -% results[16])) * period / 1e6; - ctx.timing_results.fxaa_pass_ms = @as(f32, @floatFromInt(results[19] -% results[18])) * period / 1e6; - ctx.timing_results.post_process_pass_ms = @as(f32, @floatFromInt(results[21] -% results[20])) * period / 1e6; - - ctx.timing_results.main_pass_ms = ctx.timing_results.sky_pass_ms + ctx.timing_results.opaque_pass_ms + ctx.timing_results.cloud_pass_ms; - - ctx.timing_results.validate(); - - ctx.timing_results.total_gpu_ms = 0; - ctx.timing_results.total_gpu_ms += ctx.timing_results.shadow_pass_ms[0]; - ctx.timing_results.total_gpu_ms += ctx.timing_results.shadow_pass_ms[1]; - ctx.timing_results.total_gpu_ms += ctx.timing_results.shadow_pass_ms[2]; - ctx.timing_results.total_gpu_ms += ctx.timing_results.g_pass_ms; - ctx.timing_results.total_gpu_ms += ctx.timing_results.ssao_pass_ms; - ctx.timing_results.total_gpu_ms += ctx.timing_results.main_pass_ms; - ctx.timing_results.total_gpu_ms += ctx.timing_results.bloom_pass_ms; - ctx.timing_results.total_gpu_ms += ctx.timing_results.fxaa_pass_ms; - ctx.timing_results.total_gpu_ms += ctx.timing_results.post_process_pass_ms; - - if (ctx.timing_enabled) { - std.debug.print("GPU Frame Time: {d:.2}ms (Shadow: {d:.2}, G-Pass: {d:.2}, SSAO: {d:.2}, Main: {d:.2}, Bloom: {d:.2}, FXAA: {d:.2}, Post: {d:.2})\n", .{ - ctx.timing_results.total_gpu_ms, - ctx.timing_results.shadow_pass_ms[0] + ctx.timing_results.shadow_pass_ms[1] + ctx.timing_results.shadow_pass_ms[2], - ctx.timing_results.g_pass_ms, - ctx.timing_results.ssao_pass_ms, - ctx.timing_results.main_pass_ms, - ctx.timing_results.bloom_pass_ms, - ctx.timing_results.fxaa_pass_ms, - ctx.timing_results.post_process_pass_ms, - }); - } - } + timing.processTimingResults(ctx); } pub fn createRHI(allocator: std.mem.Allocator, window: *c.SDL_Window, render_device: ?*RenderDevice, shadow_resolution: u32, msaa_samples: u8, anisotropic_filtering: u8) !rhi.RHI { - const ctx = try allocator.create(VulkanContext); - @memset(std.mem.asBytes(ctx), 0); - - // Initialize all fields to safe defaults - ctx.allocator = allocator; - ctx.render_device = render_device; - ctx.shadow_resolution = shadow_resolution; - ctx.window = window; - ctx.shadow_system = try ShadowSystem.init(allocator, shadow_resolution); - ctx.vulkan_device = .{ - .allocator = allocator, - }; - ctx.swapchain.swapchain = .{ - .device = &ctx.vulkan_device, - .window = window, - .allocator = allocator, - }; - ctx.framebuffer_resized = false; - - ctx.draw_call_count = 0; - ctx.resources.buffers = std.AutoHashMap(rhi.BufferHandle, VulkanBuffer).init(allocator); - ctx.resources.next_buffer_handle = 1; - ctx.resources.textures = std.AutoHashMap(rhi.TextureHandle, TextureResource).init(allocator); - ctx.resources.next_texture_handle = 1; - ctx.current_texture = 0; - ctx.current_normal_texture = 0; - ctx.current_roughness_texture = 0; - ctx.current_displacement_texture = 0; - ctx.current_env_texture = 0; - ctx.dummy_texture = 0; - ctx.dummy_normal_texture = 0; - ctx.dummy_roughness_texture = 0; - ctx.mutex = .{}; - ctx.swapchain.swapchain.images = .empty; - ctx.swapchain.swapchain.image_views = .empty; - ctx.swapchain.swapchain.framebuffers = .empty; - ctx.clear_color = .{ 0.07, 0.08, 0.1, 1.0 }; - ctx.frames.frame_in_progress = false; - ctx.main_pass_active = false; - ctx.shadow_system.pass_active = false; - ctx.shadow_system.pass_index = 0; - ctx.ui_in_progress = false; - ctx.ui_mapped_ptr = null; - ctx.ui_vertex_offset = 0; - ctx.frame_index = 0; - ctx.timing_enabled = false; // Will be enabled via RHI call - ctx.timing_results = std.mem.zeroes(rhi.GpuTimingResults); - ctx.frames.current_frame = 0; - ctx.frames.current_image_index = 0; - - // Optimization state tracking - ctx.terrain_pipeline_bound = false; - ctx.shadow_system.pipeline_bound = false; - ctx.descriptors_updated = false; - ctx.bound_texture = 0; - ctx.bound_normal_texture = 0; - ctx.bound_roughness_texture = 0; - ctx.bound_displacement_texture = 0; - ctx.bound_env_texture = 0; - ctx.current_mask_radius = 0; - ctx.lod_mode = false; - ctx.pending_instance_buffer = 0; - ctx.pending_lod_instance_buffer = 0; - - // Rendering options - ctx.wireframe_enabled = false; - ctx.textures_enabled = true; - ctx.vsync_enabled = true; - ctx.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; - - const safe_mode_env = std.posix.getenv("ZIGCRAFT_SAFE_MODE"); - ctx.safe_mode = if (safe_mode_env) |val| - !(std.mem.eql(u8, val, "0") or std.mem.eql(u8, val, "false")) - else - false; - if (ctx.safe_mode) { - std.log.warn("ZIGCRAFT_SAFE_MODE enabled: throttling uploads and forcing GPU idle each frame", .{}); - } - - ctx.frames.command_pool = null; - ctx.resources.transfer_command_pool = null; - ctx.resources.transfer_ready = false; - ctx.swapchain.swapchain.main_render_pass = null; - ctx.swapchain.swapchain.handle = null; - ctx.swapchain.swapchain.depth_image = null; - ctx.swapchain.swapchain.depth_image_view = null; - ctx.swapchain.swapchain.depth_image_memory = null; - ctx.swapchain.swapchain.msaa_color_image = null; - ctx.swapchain.swapchain.msaa_color_view = null; - ctx.swapchain.swapchain.msaa_color_memory = null; - ctx.pipeline = null; - ctx.pipeline_layout = null; - ctx.wireframe_pipeline = null; - ctx.sky_pipeline = null; - ctx.sky_pipeline_layout = null; - ctx.ui_pipeline = null; - ctx.ui_pipeline_layout = null; - ctx.ui_tex_pipeline = null; - ctx.ui_tex_pipeline_layout = null; - ctx.ui_tex_descriptor_set_layout = null; - ctx.ui_swapchain_pipeline = null; - ctx.ui_swapchain_tex_pipeline = null; - ctx.ui_swapchain_render_pass = null; - ctx.ui_swapchain_framebuffers = .empty; - if (comptime build_options.debug_shadows) { - ctx.debug_shadow.pipeline = null; - ctx.debug_shadow.pipeline_layout = null; - ctx.debug_shadow.descriptor_set_layout = null; - ctx.debug_shadow.vbo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.debug_shadow.descriptor_next = .{ 0, 0 }; - } - ctx.cloud_pipeline = null; - ctx.cloud_pipeline_layout = null; - ctx.cloud_vbo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.cloud_ebo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.cloud_mesh_size = 10000.0; - ctx.descriptors.descriptor_pool = null; - ctx.descriptors.descriptor_set_layout = null; - ctx.memory_type_index = 0; - ctx.anisotropic_filtering = anisotropic_filtering; - ctx.msaa_samples = msaa_samples; - - ctx.shadow_system.shadow_image = null; - ctx.shadow_system.shadow_image_view = null; - ctx.shadow_system.shadow_image_memory = null; - ctx.shadow_system.shadow_sampler = null; - ctx.shadow_system.shadow_render_pass = null; - ctx.shadow_system.shadow_pipeline = null; - for (0..rhi.SHADOW_CASCADE_COUNT) |i| { - ctx.shadow_system.shadow_image_views[i] = null; - ctx.shadow_system.shadow_framebuffers[i] = null; - ctx.shadow_system.shadow_image_layouts[i] = c.VK_IMAGE_LAYOUT_UNDEFINED; - } - - for (0..MAX_FRAMES_IN_FLIGHT) |i| { - ctx.frames.image_available_semaphores[i] = null; - ctx.frames.render_finished_semaphores[i] = null; - ctx.frames.in_flight_fences[i] = null; - ctx.descriptors.global_ubos[i] = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.descriptors.shadow_ubos[i] = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.descriptors.shadow_ubos_mapped[i] = null; - ctx.ui_vbos[i] = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.descriptors.descriptor_sets[i] = null; - ctx.descriptors.lod_descriptor_sets[i] = null; - ctx.ui_tex_descriptor_sets[i] = null; - ctx.ui_tex_descriptor_next[i] = 0; - ctx.bound_instance_buffer[i] = 0; - ctx.bound_lod_instance_buffer[i] = 0; - for (0..ctx.ui_tex_descriptor_pool[i].len) |j| { - ctx.ui_tex_descriptor_pool[i][j] = null; - } - if (comptime build_options.debug_shadows) { - ctx.debug_shadow.descriptor_sets[i] = null; - ctx.debug_shadow.descriptor_next[i] = 0; - for (0..ctx.debug_shadow.descriptor_pool[i].len) |j| { - ctx.debug_shadow.descriptor_pool[i][j] = null; - } - } - ctx.resources.buffer_deletion_queue[i] = .empty; - ctx.resources.image_deletion_queue[i] = .empty; - } - ctx.model_ubo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.dummy_instance_buffer = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; - ctx.ui_screen_width = 0; - ctx.ui_screen_height = 0; - ctx.ui_flushed_vertex_count = 0; - ctx.cloud_vao = null; - ctx.dummy_shadow_image = null; - ctx.dummy_shadow_memory = null; - ctx.dummy_shadow_view = null; - ctx.current_model = Mat4.identity; - ctx.current_color = .{ 1.0, 1.0, 1.0 }; - ctx.current_mask_radius = 0; - - return rhi.RHI{ - .ptr = ctx, - .vtable = &VULKAN_RHI_VTABLE, - .device = render_device, - }; + return context_factory.createRHI( + VulkanContext, + allocator, + window, + render_device, + shadow_resolution, + msaa_samples, + anisotropic_filtering, + &VULKAN_RHI_VTABLE, + ); } diff --git a/src/engine/graphics/vulkan/bloom_system.zig b/src/engine/graphics/vulkan/bloom_system.zig index 578bf1f9..aa5b2fe7 100644 --- a/src/engine/graphics/vulkan/bloom_system.zig +++ b/src/engine/graphics/vulkan/bloom_system.zig @@ -355,7 +355,7 @@ pub const BloomSystem = struct { var image_info_prev = c.VkDescriptorImageInfo{ .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - .imageView = self.mip_views[target_mip], + .imageView = self.mip_views[src_mip], .sampler = self.sampler, }; @@ -385,6 +385,132 @@ pub const BloomSystem = struct { } } + pub fn compute( + self: *const BloomSystem, + command_buffer: c.VkCommandBuffer, + frame: usize, + hdr_image: c.VkImage, + hdr_extent: c.VkExtent2D, + draw_call_count: *u32, + ) void { + if (!self.enabled) return; + if (self.downsample_pipeline == null) return; + if (self.upsample_pipeline == null) return; + if (self.render_pass == null) return; + if (hdr_image == null) return; + + var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); + barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + barrier.oldLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.image = hdr_image; + barrier.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; + + c.vkCmdPipelineBarrier(command_buffer, c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); + + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, self.downsample_pipeline); + + for (0..BLOOM_MIP_COUNT) |i| { + const mip_width = self.mip_widths[i]; + const mip_height = self.mip_heights[i]; + + var clear_value = std.mem.zeroes(c.VkClearValue); + clear_value.color.float32 = .{ 0.0, 0.0, 0.0, 1.0 }; + + var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); + rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + rp_begin.renderPass = self.render_pass; + rp_begin.framebuffer = self.mip_framebuffers[i]; + rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; + rp_begin.clearValueCount = 1; + rp_begin.pClearValues = &clear_value; + + c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); + + const viewport = c.VkViewport{ + .x = 0, + .y = 0, + .width = @floatFromInt(mip_width), + .height = @floatFromInt(mip_height), + .minDepth = 0.0, + .maxDepth = 1.0, + }; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipeline_layout, 0, 1, &self.descriptor_sets[frame][i], 0, null); + + const src_width: f32 = if (i == 0) @floatFromInt(hdr_extent.width) else @floatFromInt(self.mip_widths[i - 1]); + const src_height: f32 = if (i == 0) @floatFromInt(hdr_extent.height) else @floatFromInt(self.mip_heights[i - 1]); + + const push = BloomPushConstants{ + .texel_size = .{ 1.0 / src_width, 1.0 / src_height }, + .threshold_or_radius = if (i == 0) self.threshold else 0.0, + .soft_threshold_or_intensity = 0.5, + .mip_level = @intCast(i), + }; + c.vkCmdPushConstants(command_buffer, self.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(BloomPushConstants), &push); + + c.vkCmdDraw(command_buffer, 3, 1, 0, 0); + draw_call_count.* += 1; + + c.vkCmdEndRenderPass(command_buffer); + } + + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, self.upsample_pipeline); + + for (0..BLOOM_MIP_COUNT - 1) |pass| { + const target_mip = (BLOOM_MIP_COUNT - 2) - pass; + const mip_width = self.mip_widths[target_mip]; + const mip_height = self.mip_heights[target_mip]; + + var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); + rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + rp_begin.renderPass = self.render_pass; + rp_begin.framebuffer = self.mip_framebuffers[target_mip]; + rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; + rp_begin.clearValueCount = 0; + + c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); + + const viewport = c.VkViewport{ + .x = 0, + .y = 0, + .width = @floatFromInt(mip_width), + .height = @floatFromInt(mip_height), + .minDepth = 0.0, + .maxDepth = 1.0, + }; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = mip_width, .height = mip_height } }; + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipeline_layout, 0, 1, &self.descriptor_sets[frame][BLOOM_MIP_COUNT + pass], 0, null); + + const src_mip = target_mip + 1; + const src_width: f32 = @floatFromInt(self.mip_widths[src_mip]); + const src_height: f32 = @floatFromInt(self.mip_heights[src_mip]); + + const push = BloomPushConstants{ + .texel_size = .{ 1.0 / src_width, 1.0 / src_height }, + .threshold_or_radius = 1.0, + .soft_threshold_or_intensity = self.intensity, + .mip_level = 0, + }; + c.vkCmdPushConstants(command_buffer, self.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(BloomPushConstants), &push); + + c.vkCmdDraw(command_buffer, 3, 1, 0, 0); + draw_call_count.* += 1; + + c.vkCmdEndRenderPass(command_buffer); + } + } + pub fn deinit(self: *BloomSystem, device: c.VkDevice, _: Allocator, descriptor_pool: c.VkDescriptorPool) void { if (self.downsample_pipeline != null) { c.vkDestroyPipeline(device, self.downsample_pipeline, null); diff --git a/src/engine/graphics/vulkan/descriptor_bindings.zig b/src/engine/graphics/vulkan/descriptor_bindings.zig new file mode 100644 index 00000000..e777bea6 --- /dev/null +++ b/src/engine/graphics/vulkan/descriptor_bindings.zig @@ -0,0 +1,14 @@ +pub const GLOBAL_UBO = 0; +pub const ALBEDO_TEXTURE = 1; +pub const SHADOW_UBO = 2; +pub const SHADOW_COMPARE_TEXTURE = 3; +pub const SHADOW_REGULAR_TEXTURE = 4; +pub const INSTANCE_SSBO = 5; +pub const NORMAL_TEXTURE = 6; +pub const ROUGHNESS_TEXTURE = 7; +pub const DISPLACEMENT_TEXTURE = 8; +pub const ENV_TEXTURE = 9; +pub const SSAO_TEXTURE = 10; +pub const LPV_TEXTURE = 11; // LPV SH Red channel (rgba32f = 4 SH coefficients) +pub const LPV_TEXTURE_G = 12; // LPV SH Green channel +pub const LPV_TEXTURE_B = 13; // LPV SH Blue channel diff --git a/src/engine/graphics/vulkan/descriptor_manager.zig b/src/engine/graphics/vulkan/descriptor_manager.zig index 4f856643..06c04bc2 100644 --- a/src/engine/graphics/vulkan/descriptor_manager.zig +++ b/src/engine/graphics/vulkan/descriptor_manager.zig @@ -22,12 +22,15 @@ const GlobalUniforms = extern struct { pbr_params: [4]f32, volumetric_params: [4]f32, viewport_size: [4]f32, + lpv_params: [4]f32, + lpv_origin: [4]f32, }; const ShadowUniforms = extern struct { light_space_matrices: [rhi.SHADOW_CASCADE_COUNT]Mat4, cascade_splits: [4]f32, shadow_texel_sizes: [4]f32, + shadow_params: [4]f32, // x = light_size (PCSS), y/z/w reserved }; pub const DescriptorManager = struct { @@ -48,6 +51,7 @@ pub const DescriptorManager = struct { // Dummy textures dummy_texture: rhi.TextureHandle, + dummy_texture_3d: rhi.TextureHandle, dummy_normal_texture: rhi.TextureHandle, dummy_roughness_texture: rhi.TextureHandle, @@ -65,6 +69,7 @@ pub const DescriptorManager = struct { .shadow_ubos = std.mem.zeroes([rhi.MAX_FRAMES_IN_FLIGHT]VulkanBuffer), .shadow_ubos_mapped = std.mem.zeroes([rhi.MAX_FRAMES_IN_FLIGHT]?*anyopaque), .dummy_texture = 0, + .dummy_texture_3d = 0, .dummy_normal_texture = 0, .dummy_roughness_texture = 0, }; @@ -105,15 +110,24 @@ pub const DescriptorManager = struct { return err; }; + // 1x1x1 3D dummy texture for sampler3D bindings (LPV). + // Uses rgba32f to match LPV texture format, with zero data (no SH contribution). + const zero_pixel = [_]u8{0} ** 16; // 4 x f32 = 16 bytes, all zero + self.dummy_texture_3d = resource_manager.createTexture3D(1, 1, 1, .rgba32f, .{}, &zero_pixel) catch |err| { + self.deinit(); + return err; + }; + resource_manager.flushTransfer() catch |err| { self.deinit(); return err; }; // Create Descriptor Pool + // Increased sizes to accommodate UI texture descriptor sets (128) + FXAA (2) + Bloom (20) + main (4) var pool_sizes = [_]c.VkDescriptorPoolSize{ .{ .type = c.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 500 }, - .{ .type = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 500 }, + .{ .type = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1000 }, .{ .type = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, .descriptorCount = 100 }, }; @@ -121,7 +135,7 @@ pub const DescriptorManager = struct { pool_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info.poolSizeCount = pool_sizes.len; pool_info.pPoolSizes = &pool_sizes[0]; - pool_info.maxSets = 500; + pool_info.maxSets = 1000; pool_info.flags = c.VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; Utils.checkVk(c.vkCreateDescriptorPool(vulkan_device.vk_device, &pool_info, null, &self.descriptor_pool)) catch |err| { @@ -153,6 +167,12 @@ pub const DescriptorManager = struct { .{ .binding = 9, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, // 10: SSAO Map .{ .binding = 10, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + // 11: LPV SH Red channel (or scalar RGB when SH disabled) + .{ .binding = 11, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + // 12: LPV SH Green channel + .{ .binding = 12, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + // 13: LPV SH Blue channel + .{ .binding = 13, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, }; var layout_info = std.mem.zeroes(c.VkDescriptorSetLayoutCreateInfo); diff --git a/src/engine/graphics/vulkan/device.zig b/src/engine/graphics/vulkan/device.zig new file mode 100644 index 00000000..cefdb7b5 --- /dev/null +++ b/src/engine/graphics/vulkan/device.zig @@ -0,0 +1,3 @@ +const VulkanDeviceImpl = @import("../vulkan_device.zig"); + +pub const VulkanDevice = VulkanDeviceImpl.VulkanDevice; diff --git a/src/engine/graphics/vulkan/pipeline_manager.zig b/src/engine/graphics/vulkan/pipeline_manager.zig new file mode 100644 index 00000000..43aface1 --- /dev/null +++ b/src/engine/graphics/vulkan/pipeline_manager.zig @@ -0,0 +1,540 @@ +//! Pipeline Manager - Handles all Vulkan pipeline creation and management +//! +//! Extracted from rhi_vulkan.zig to eliminate the god object anti-pattern. +//! This module is responsible for: +//! - Creating and destroying graphics pipelines +//! - Managing pipeline layouts +//! - Handling pipeline state for different rendering modes + +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const VulkanDevice = @import("../vulkan_device.zig").VulkanDevice; +const DescriptorManager = @import("descriptor_manager.zig").DescriptorManager; +const Utils = @import("utils.zig"); +const shader_registry = @import("shader_registry.zig"); +const pipeline_specialized = @import("pipeline_specialized.zig"); +const build_options = @import("build_options"); +const Mat4 = @import("../../math/mat4.zig").Mat4; + +/// Maximum number of frames in flight +const MAX_FRAMES_IN_FLIGHT = rhi.MAX_FRAMES_IN_FLIGHT; + +/// Push constant sizes for different pipeline types +const PUSH_CONSTANT_SIZE_MODEL: u32 = 256; // mat4 model + vec3 color + float mask +const PUSH_CONSTANT_SIZE_SKY: u32 = 128; // mat4 view_proj + vec4 params +const PUSH_CONSTANT_SIZE_UI: u32 = @sizeOf(Mat4); // Orthographic projection matrix +const MAX_SHADER_MODULE_BYTES: usize = 4 * 1024 * 1024; + +/// Pipeline manager handles all pipeline-related resources +pub const PipelineManager = struct { + // Main pipelines + terrain_pipeline: c.VkPipeline = null, + wireframe_pipeline: c.VkPipeline = null, + selection_pipeline: c.VkPipeline = null, + line_pipeline: c.VkPipeline = null, + g_pipeline: c.VkPipeline = null, + sky_pipeline: c.VkPipeline = null, + ui_pipeline: c.VkPipeline = null, + ui_tex_pipeline: c.VkPipeline = null, + cloud_pipeline: c.VkPipeline = null, + + // Swapchain UI pipelines + ui_swapchain_pipeline: c.VkPipeline = null, + ui_swapchain_tex_pipeline: c.VkPipeline = null, + + // Pipeline layouts + pipeline_layout: c.VkPipelineLayout = null, + sky_pipeline_layout: c.VkPipelineLayout = null, + ui_pipeline_layout: c.VkPipelineLayout = null, + ui_tex_pipeline_layout: c.VkPipelineLayout = null, + cloud_pipeline_layout: c.VkPipelineLayout = null, + ui_tex_descriptor_set_layout: c.VkDescriptorSetLayout = null, + + // Debug shadow pipeline (conditional) + debug_shadow_pipeline: ?c.VkPipeline = null, + debug_shadow_pipeline_layout: ?c.VkPipelineLayout = null, + debug_shadow_descriptor_set_layout: ?c.VkDescriptorSetLayout = null, + + /// Initialize the pipeline manager and create all pipeline layouts + pub fn init( + device: *const VulkanDevice, + descriptor_manager: *const DescriptorManager, + debug_shadow_layout: ?c.VkDescriptorSetLayout, + ) !PipelineManager { + var manager: PipelineManager = .{}; + + try manager.createPipelineLayouts(device, descriptor_manager, debug_shadow_layout); + + return manager; + } + + /// Deinitialize and destroy all pipelines and layouts + pub fn deinit(self: *PipelineManager, vk_device: c.VkDevice) void { + self.destroyPipelines(vk_device); + self.destroyPipelineLayouts(vk_device); + } + + /// Load shader from file and create shader module + /// Caller must destroy the returned module with vkDestroyShaderModule + fn loadShaderModule( + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + path: []const u8, + ) !c.VkShaderModule { + const code = try std.fs.cwd().readFileAlloc(path, allocator, @enumFromInt(MAX_SHADER_MODULE_BYTES)); + defer allocator.free(code); + return try Utils.createShaderModule(vk_device, code); + } + + /// Load vertex and fragment shader pair + /// Returns both modules - caller must destroy both + fn loadShaderPair( + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + vert_path: []const u8, + frag_path: []const u8, + ) !struct { vert: c.VkShaderModule, frag: c.VkShaderModule } { + const vert = try loadShaderModule(allocator, vk_device, vert_path); + errdefer c.vkDestroyShaderModule(vk_device, vert, null); + const frag = try loadShaderModule(allocator, vk_device, frag_path); + return .{ .vert = vert, .frag = frag }; + } + + /// Create all pipeline layouts + fn createPipelineLayouts( + self: *PipelineManager, + device: *const VulkanDevice, + descriptor_manager: *const DescriptorManager, + debug_shadow_layout: ?c.VkDescriptorSetLayout, + ) !void { + const vk_device = device.vk_device; + + // Main pipeline layout with model push constants + var model_push_constant = std.mem.zeroes(c.VkPushConstantRange); + model_push_constant.stageFlags = c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT; + model_push_constant.size = PUSH_CONSTANT_SIZE_MODEL; + + var pipeline_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + pipeline_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + pipeline_layout_info.setLayoutCount = 1; + pipeline_layout_info.pSetLayouts = &descriptor_manager.descriptor_set_layout; + pipeline_layout_info.pushConstantRangeCount = 1; + pipeline_layout_info.pPushConstantRanges = &model_push_constant; + + try Utils.checkVk(c.vkCreatePipelineLayout(vk_device, &pipeline_layout_info, null, &self.pipeline_layout)); + + // Sky pipeline layout + var sky_push_constant = std.mem.zeroes(c.VkPushConstantRange); + sky_push_constant.stageFlags = c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT; + sky_push_constant.size = PUSH_CONSTANT_SIZE_SKY; + + var sky_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + sky_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + sky_layout_info.setLayoutCount = 1; + sky_layout_info.pSetLayouts = &descriptor_manager.descriptor_set_layout; + sky_layout_info.pushConstantRangeCount = 1; + sky_layout_info.pPushConstantRanges = &sky_push_constant; + + try Utils.checkVk(c.vkCreatePipelineLayout(vk_device, &sky_layout_info, null, &self.sky_pipeline_layout)); + + // UI pipeline layout + var ui_push_constant = std.mem.zeroes(c.VkPushConstantRange); + ui_push_constant.stageFlags = c.VK_SHADER_STAGE_VERTEX_BIT; + ui_push_constant.size = @sizeOf(Mat4); + + var ui_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + ui_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + ui_layout_info.pushConstantRangeCount = 1; + ui_layout_info.pPushConstantRanges = &ui_push_constant; + + try Utils.checkVk(c.vkCreatePipelineLayout(vk_device, &ui_layout_info, null, &self.ui_pipeline_layout)); + + // UI texture descriptor set layout + var ui_tex_layout_bindings = [_]c.VkDescriptorSetLayoutBinding{ + .{ .binding = 0, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + }; + + var ui_tex_layout_info = std.mem.zeroes(c.VkDescriptorSetLayoutCreateInfo); + ui_tex_layout_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + ui_tex_layout_info.bindingCount = 1; + ui_tex_layout_info.pBindings = &ui_tex_layout_bindings[0]; + + try Utils.checkVk(c.vkCreateDescriptorSetLayout(vk_device, &ui_tex_layout_info, null, &self.ui_tex_descriptor_set_layout)); + + // UI texture pipeline layout + var ui_tex_layout_full_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + ui_tex_layout_full_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + ui_tex_layout_full_info.setLayoutCount = 1; + ui_tex_layout_full_info.pSetLayouts = &self.ui_tex_descriptor_set_layout; + ui_tex_layout_full_info.pushConstantRangeCount = 1; + ui_tex_layout_full_info.pPushConstantRanges = &ui_push_constant; + + try Utils.checkVk(c.vkCreatePipelineLayout(vk_device, &ui_tex_layout_full_info, null, &self.ui_tex_pipeline_layout)); + + // Debug shadow pipeline layout + if (comptime build_options.debug_shadows) { + if (debug_shadow_layout) |layout| { + self.debug_shadow_descriptor_set_layout = layout; + + var debug_shadow_layout_full_info: c.VkPipelineLayoutCreateInfo = undefined; + @memset(std.mem.asBytes(&debug_shadow_layout_full_info), 0); + debug_shadow_layout_full_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + debug_shadow_layout_full_info.setLayoutCount = 1; + debug_shadow_layout_full_info.pSetLayouts = &layout; + debug_shadow_layout_full_info.pushConstantRangeCount = 1; + debug_shadow_layout_full_info.pPushConstantRanges = &ui_push_constant; + + try Utils.checkVk(c.vkCreatePipelineLayout(vk_device, &debug_shadow_layout_full_info, null, &self.debug_shadow_pipeline_layout.?)); + } + } + + // Cloud pipeline layout + var cloud_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + cloud_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + cloud_layout_info.pushConstantRangeCount = 1; + cloud_layout_info.pPushConstantRanges = &sky_push_constant; + + try Utils.checkVk(c.vkCreatePipelineLayout(vk_device, &cloud_layout_info, null, &self.cloud_pipeline_layout)); + } + + /// Destroy all pipeline layouts + fn destroyPipelineLayouts(self: *PipelineManager, vk_device: c.VkDevice) void { + if (self.pipeline_layout) |layout| c.vkDestroyPipelineLayout(vk_device, layout, null); + if (self.sky_pipeline_layout) |layout| c.vkDestroyPipelineLayout(vk_device, layout, null); + if (self.ui_pipeline_layout) |layout| c.vkDestroyPipelineLayout(vk_device, layout, null); + if (self.ui_tex_pipeline_layout) |layout| c.vkDestroyPipelineLayout(vk_device, layout, null); + if (self.ui_tex_descriptor_set_layout) |layout| c.vkDestroyDescriptorSetLayout(vk_device, layout, null); + if (self.cloud_pipeline_layout) |layout| c.vkDestroyPipelineLayout(vk_device, layout, null); + + if (comptime build_options.debug_shadows) { + if (self.debug_shadow_pipeline_layout) |layout| c.vkDestroyPipelineLayout(vk_device, layout, null); + if (self.debug_shadow_descriptor_set_layout) |layout| c.vkDestroyDescriptorSetLayout(vk_device, layout, null); + } + } + + /// Destroy all pipelines (but not layouts) + pub fn destroyPipelines(self: *PipelineManager, vk_device: c.VkDevice) void { + if (self.terrain_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.wireframe_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.selection_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.line_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.g_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.sky_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.ui_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.ui_tex_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.cloud_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.ui_swapchain_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.ui_swapchain_tex_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + + if (comptime build_options.debug_shadows) { + if (self.debug_shadow_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + } + + self.terrain_pipeline = null; + self.wireframe_pipeline = null; + self.selection_pipeline = null; + self.line_pipeline = null; + self.g_pipeline = null; + self.sky_pipeline = null; + self.ui_pipeline = null; + self.ui_tex_pipeline = null; + self.cloud_pipeline = null; + self.ui_swapchain_pipeline = null; + self.ui_swapchain_tex_pipeline = null; + self.debug_shadow_pipeline = null; + } + + /// Create all main rendering pipelines + pub fn createMainPipelines( + self: *PipelineManager, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + g_render_pass: c.VkRenderPass, + msaa_samples: u8, + ) !void { + // Validate required render passes + if (hdr_render_pass == null) return error.InvalidRenderPass; + + // Destroy existing pipelines first + self.destroyPipelines(vk_device); + + // Setup rollback on failure - destroy any created pipelines if we fail partway + errdefer self.destroyPipelines(vk_device); + + const sample_count = getMSAASampleCountFlag(msaa_samples); + + // Common pipeline state + var viewport_state = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); + viewport_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewport_state.viewportCount = 1; + viewport_state.scissorCount = 1; + + const dynamic_states = [_]c.VkDynamicState{ c.VK_DYNAMIC_STATE_VIEWPORT, c.VK_DYNAMIC_STATE_SCISSOR }; + var dynamic_state = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); + dynamic_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamic_state.dynamicStateCount = 2; + dynamic_state.pDynamicStates = &dynamic_states; + + var input_assembly = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); + input_assembly.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + + var rasterizer = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); + rasterizer.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rasterizer.lineWidth = 1.0; + rasterizer.cullMode = c.VK_CULL_MODE_NONE; + rasterizer.frontFace = c.VK_FRONT_FACE_CLOCKWISE; + + var multisampling = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); + multisampling.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + multisampling.rasterizationSamples = sample_count; + + var depth_stencil = std.mem.zeroes(c.VkPipelineDepthStencilStateCreateInfo); + depth_stencil.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + depth_stencil.depthTestEnable = c.VK_TRUE; + depth_stencil.depthWriteEnable = c.VK_TRUE; + depth_stencil.depthCompareOp = c.VK_COMPARE_OP_GREATER_OR_EQUAL; + + var color_blend_attachment = std.mem.zeroes(c.VkPipelineColorBlendAttachmentState); + color_blend_attachment.colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; + + var ui_color_blend_attachment = color_blend_attachment; + ui_color_blend_attachment.blendEnable = c.VK_TRUE; + ui_color_blend_attachment.srcColorBlendFactor = c.VK_BLEND_FACTOR_SRC_ALPHA; + ui_color_blend_attachment.dstColorBlendFactor = c.VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + ui_color_blend_attachment.colorBlendOp = c.VK_BLEND_OP_ADD; + ui_color_blend_attachment.srcAlphaBlendFactor = c.VK_BLEND_FACTOR_ONE; + ui_color_blend_attachment.dstAlphaBlendFactor = c.VK_BLEND_FACTOR_ZERO; + ui_color_blend_attachment.alphaBlendOp = c.VK_BLEND_OP_ADD; + + var ui_color_blending = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); + ui_color_blending.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + ui_color_blending.attachmentCount = 1; + ui_color_blending.pAttachments = &ui_color_blend_attachment; + + var terrain_color_blending = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); + terrain_color_blending.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + terrain_color_blending.attachmentCount = 1; + terrain_color_blending.pAttachments = &color_blend_attachment; + + // Terrain Pipeline + try self.createTerrainPipeline(allocator, vk_device, hdr_render_pass, &viewport_state, &dynamic_state, &input_assembly, &rasterizer, &multisampling, &depth_stencil, &terrain_color_blending, sample_count, g_render_pass); + + // Sky Pipeline + try self.createSkyPipeline(allocator, vk_device, hdr_render_pass, &viewport_state, &dynamic_state, &input_assembly, &rasterizer, &multisampling, &depth_stencil, &terrain_color_blending); + + // UI Pipelines + try self.createUIPipelines(allocator, vk_device, hdr_render_pass, &viewport_state, &dynamic_state, &input_assembly, &rasterizer, &multisampling, &depth_stencil, &ui_color_blending); + + // Debug Shadow Pipeline + if (comptime build_options.debug_shadows) { + if (self.debug_shadow_pipeline_layout != null) { + try self.createDebugShadowPipeline(allocator, vk_device, hdr_render_pass, &viewport_state, &dynamic_state, &input_assembly, &rasterizer, &multisampling, &depth_stencil, &ui_color_blending); + } + } + + // Cloud Pipeline + try self.createCloudPipeline(allocator, vk_device, hdr_render_pass, &viewport_state, &dynamic_state, &input_assembly, &rasterizer, &multisampling, &depth_stencil, &ui_color_blending); + } + + /// Create terrain pipeline and variants + fn createTerrainPipeline( + self: *PipelineManager, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, + _sample_count: c.VkSampleCountFlagBits, + g_render_pass: c.VkRenderPass, + ) !void { + try pipeline_specialized.createTerrainPipeline(self, allocator, vk_device, hdr_render_pass, viewport_state, dynamic_state, input_assembly, rasterizer, multisampling, depth_stencil, color_blending, _sample_count, g_render_pass); + } + + /// Create sky pipeline + fn createSkyPipeline( + self: *PipelineManager, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, + ) !void { + var sky_rasterizer = rasterizer.*; + sky_rasterizer.cullMode = c.VK_CULL_MODE_NONE; + + const shaders = try loadShaderPair(allocator, vk_device, shader_registry.SKY_VERT, shader_registry.SKY_FRAG); + defer c.vkDestroyShaderModule(vk_device, shaders.vert, null); + defer c.vkDestroyShaderModule(vk_device, shaders.frag, null); + + var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = shaders.vert, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = shaders.frag, .pName = "main" }, + }; + + var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + + var sky_depth_stencil = depth_stencil.*; + sky_depth_stencil.depthWriteEnable = c.VK_FALSE; + + var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = 2; + pipeline_info.pStages = &shader_stages[0]; + pipeline_info.pVertexInputState = &vertex_input_info; + pipeline_info.pInputAssemblyState = input_assembly; + pipeline_info.pViewportState = viewport_state; + pipeline_info.pRasterizationState = &sky_rasterizer; + pipeline_info.pMultisampleState = multisampling; + pipeline_info.pDepthStencilState = &sky_depth_stencil; + pipeline_info.pColorBlendState = color_blending; + pipeline_info.pDynamicState = dynamic_state; + pipeline_info.layout = self.sky_pipeline_layout; + pipeline_info.renderPass = hdr_render_pass; + pipeline_info.subpass = 0; + + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.sky_pipeline)); + } + + /// Create UI pipelines + fn createUIPipelines( + self: *PipelineManager, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, + ) !void { + // UI vertex format: position (2 floats) + color (4 floats) = 6 floats per vertex + const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 6 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; + + var attribute_descriptions: [2]c.VkVertexInputAttributeDescription = undefined; + attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; + attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32A32_SFLOAT, .offset = 2 * 4 }; + + var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_input_info.vertexBindingDescriptionCount = 1; + vertex_input_info.pVertexBindingDescriptions = &binding_description; + vertex_input_info.vertexAttributeDescriptionCount = 2; + vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; + + var ui_depth_stencil = depth_stencil.*; + ui_depth_stencil.depthTestEnable = c.VK_FALSE; + ui_depth_stencil.depthWriteEnable = c.VK_FALSE; + + // Colored UI pipeline + const ui_shaders = try loadShaderPair(allocator, vk_device, shader_registry.UI_VERT, shader_registry.UI_FRAG); + defer c.vkDestroyShaderModule(vk_device, ui_shaders.vert, null); + defer c.vkDestroyShaderModule(vk_device, ui_shaders.frag, null); + + var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = ui_shaders.vert, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = ui_shaders.frag, .pName = "main" }, + }; + + var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = 2; + pipeline_info.pStages = &shader_stages[0]; + pipeline_info.pVertexInputState = &vertex_input_info; + pipeline_info.pInputAssemblyState = input_assembly; + pipeline_info.pViewportState = viewport_state; + pipeline_info.pRasterizationState = rasterizer; + pipeline_info.pMultisampleState = multisampling; + pipeline_info.pDepthStencilState = &ui_depth_stencil; + pipeline_info.pColorBlendState = color_blending; + pipeline_info.pDynamicState = dynamic_state; + pipeline_info.layout = self.ui_pipeline_layout; + pipeline_info.renderPass = hdr_render_pass; + pipeline_info.subpass = 0; + + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.ui_pipeline)); + + // Textured UI pipeline + const tex_ui_shaders = try loadShaderPair(allocator, vk_device, shader_registry.UI_TEX_VERT, shader_registry.UI_TEX_FRAG); + defer c.vkDestroyShaderModule(vk_device, tex_ui_shaders.vert, null); + defer c.vkDestroyShaderModule(vk_device, tex_ui_shaders.frag, null); + + var tex_shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = tex_ui_shaders.vert, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = tex_ui_shaders.frag, .pName = "main" }, + }; + + pipeline_info.pStages = &tex_shader_stages[0]; + pipeline_info.layout = self.ui_tex_pipeline_layout; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.ui_tex_pipeline)); + } + + /// Create swapchain UI pipelines + pub fn createSwapchainUIPipelines( + self: *PipelineManager, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + ui_swapchain_render_pass: c.VkRenderPass, + ) !void { + try pipeline_specialized.createSwapchainUIPipelines(self, allocator, vk_device, ui_swapchain_render_pass); + } + + /// Create debug shadow pipeline + fn createDebugShadowPipeline( + self: *PipelineManager, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, + ) !void { + try pipeline_specialized.createDebugShadowPipeline(self, allocator, vk_device, hdr_render_pass, viewport_state, dynamic_state, input_assembly, rasterizer, multisampling, depth_stencil, color_blending); + } + + /// Create cloud pipeline + fn createCloudPipeline( + self: *PipelineManager, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, + ) !void { + try pipeline_specialized.createCloudPipeline(self, allocator, vk_device, hdr_render_pass, viewport_state, dynamic_state, input_assembly, rasterizer, multisampling, depth_stencil, color_blending); + } +}; + +/// Converts MSAA sample count (1, 2, 4, 8) to Vulkan sample count flag. +fn getMSAASampleCountFlag(samples: u8) c.VkSampleCountFlagBits { + return switch (samples) { + 2 => c.VK_SAMPLE_COUNT_2_BIT, + 4 => c.VK_SAMPLE_COUNT_4_BIT, + 8 => c.VK_SAMPLE_COUNT_8_BIT, + else => c.VK_SAMPLE_COUNT_1_BIT, + }; +} diff --git a/src/engine/graphics/vulkan/pipeline_specialized.zig b/src/engine/graphics/vulkan/pipeline_specialized.zig new file mode 100644 index 00000000..002118af --- /dev/null +++ b/src/engine/graphics/vulkan/pipeline_specialized.zig @@ -0,0 +1,378 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Utils = @import("utils.zig"); +const shader_registry = @import("shader_registry.zig"); + +fn loadShaderModule( + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + path: []const u8, +) !c.VkShaderModule { + const code = try std.fs.cwd().readFileAlloc(path, allocator, @enumFromInt(1024 * 1024)); + defer allocator.free(code); + return try Utils.createShaderModule(vk_device, code); +} + +fn loadShaderPair( + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + vert_path: []const u8, + frag_path: []const u8, +) !struct { vert: c.VkShaderModule, frag: c.VkShaderModule } { + const vert = try loadShaderModule(allocator, vk_device, vert_path); + errdefer c.vkDestroyShaderModule(vk_device, vert, null); + const frag = try loadShaderModule(allocator, vk_device, frag_path); + return .{ .vert = vert, .frag = frag }; +} + +pub fn createTerrainPipeline( + self: anytype, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, + _sample_count: c.VkSampleCountFlagBits, + g_render_pass: c.VkRenderPass, +) !void { + _ = _sample_count; + const vert_module = try loadShaderModule(allocator, vk_device, shader_registry.TERRAIN_VERT); + defer c.vkDestroyShaderModule(vk_device, vert_module, null); + const frag_module = try loadShaderModule(allocator, vk_device, shader_registry.TERRAIN_FRAG); + defer c.vkDestroyShaderModule(vk_device, frag_module, null); + + var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, + }; + + const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = @sizeOf(rhi.Vertex), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; + + var attribute_descriptions: [8]c.VkVertexInputAttributeDescription = undefined; + attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 0 }; + attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 3 * 4 }; + attribute_descriptions[2] = .{ .binding = 0, .location = 2, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 6 * 4 }; + attribute_descriptions[3] = .{ .binding = 0, .location = 3, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 9 * 4 }; + attribute_descriptions[4] = .{ .binding = 0, .location = 4, .format = c.VK_FORMAT_R32_SFLOAT, .offset = 11 * 4 }; + attribute_descriptions[5] = .{ .binding = 0, .location = 5, .format = c.VK_FORMAT_R32_SFLOAT, .offset = 12 * 4 }; + attribute_descriptions[6] = .{ .binding = 0, .location = 6, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 13 * 4 }; + attribute_descriptions[7] = .{ .binding = 0, .location = 7, .format = c.VK_FORMAT_R32_SFLOAT, .offset = 16 * 4 }; + + var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_input_info.vertexBindingDescriptionCount = 1; + vertex_input_info.pVertexBindingDescriptions = &binding_description; + vertex_input_info.vertexAttributeDescriptionCount = 8; + vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; + + var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = 2; + pipeline_info.pStages = &shader_stages[0]; + pipeline_info.pVertexInputState = &vertex_input_info; + pipeline_info.pInputAssemblyState = input_assembly; + pipeline_info.pViewportState = viewport_state; + pipeline_info.pRasterizationState = rasterizer; + pipeline_info.pMultisampleState = multisampling; + pipeline_info.pDepthStencilState = depth_stencil; + pipeline_info.pColorBlendState = color_blending; + pipeline_info.pDynamicState = dynamic_state; + pipeline_info.layout = self.pipeline_layout; + pipeline_info.renderPass = hdr_render_pass; + pipeline_info.subpass = 0; + + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.terrain_pipeline)); + + var wireframe_rasterizer = rasterizer.*; + wireframe_rasterizer.cullMode = c.VK_CULL_MODE_NONE; + wireframe_rasterizer.polygonMode = c.VK_POLYGON_MODE_LINE; + pipeline_info.pRasterizationState = &wireframe_rasterizer; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.wireframe_pipeline)); + + var selection_rasterizer = rasterizer.*; + selection_rasterizer.cullMode = c.VK_CULL_MODE_NONE; + selection_rasterizer.polygonMode = c.VK_POLYGON_MODE_FILL; + var selection_pipeline_info = pipeline_info; + selection_pipeline_info.pRasterizationState = &selection_rasterizer; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &selection_pipeline_info, null, &self.selection_pipeline)); + + var line_input_assembly = input_assembly.*; + line_input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_LINE_LIST; + var line_pipeline_info = pipeline_info; + line_pipeline_info.pInputAssemblyState = &line_input_assembly; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &line_pipeline_info, null, &self.line_pipeline)); + + if (g_render_pass != null) { + const g_frag_module = try loadShaderModule(allocator, vk_device, shader_registry.G_PASS_FRAG); + defer c.vkDestroyShaderModule(vk_device, g_frag_module, null); + + var g_shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = g_frag_module, .pName = "main" }, + }; + + var g_color_blend_attachments = [_]c.VkPipelineColorBlendAttachmentState{ + std.mem.zeroes(c.VkPipelineColorBlendAttachmentState), + std.mem.zeroes(c.VkPipelineColorBlendAttachmentState), + }; + g_color_blend_attachments[0].colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; + g_color_blend_attachments[1].colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; + + var g_color_blending = color_blending.*; + g_color_blending.attachmentCount = 2; + g_color_blending.pAttachments = &g_color_blend_attachments[0]; + + var g_multisampling = multisampling.*; + g_multisampling.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; + + var g_pipeline_info = pipeline_info; + g_pipeline_info.stageCount = 2; + g_pipeline_info.pStages = &g_shader_stages[0]; + g_pipeline_info.pMultisampleState = &g_multisampling; + g_pipeline_info.pColorBlendState = &g_color_blending; + g_pipeline_info.renderPass = g_render_pass; + g_pipeline_info.subpass = 0; + + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &g_pipeline_info, null, &self.g_pipeline)); + } +} + +pub fn createSwapchainUIPipelines( + self: anytype, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + ui_swapchain_render_pass: c.VkRenderPass, +) !void { + if (ui_swapchain_render_pass == null) return error.InitializationFailed; + + if (self.ui_swapchain_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + if (self.ui_swapchain_tex_pipeline) |p| c.vkDestroyPipeline(vk_device, p, null); + self.ui_swapchain_pipeline = null; + self.ui_swapchain_tex_pipeline = null; + + var viewport_state = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); + viewport_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewport_state.viewportCount = 1; + viewport_state.scissorCount = 1; + + const dynamic_states = [_]c.VkDynamicState{ c.VK_DYNAMIC_STATE_VIEWPORT, c.VK_DYNAMIC_STATE_SCISSOR }; + var dynamic_state = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); + dynamic_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamic_state.dynamicStateCount = 2; + dynamic_state.pDynamicStates = &dynamic_states; + + var input_assembly = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); + input_assembly.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + + var rasterizer = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); + rasterizer.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rasterizer.lineWidth = 1.0; + rasterizer.cullMode = c.VK_CULL_MODE_NONE; + rasterizer.frontFace = c.VK_FRONT_FACE_CLOCKWISE; + + var multisampling = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); + multisampling.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + multisampling.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; + + var depth_stencil = std.mem.zeroes(c.VkPipelineDepthStencilStateCreateInfo); + depth_stencil.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + depth_stencil.depthTestEnable = c.VK_FALSE; + depth_stencil.depthWriteEnable = c.VK_FALSE; + + var ui_color_blend_attachment = std.mem.zeroes(c.VkPipelineColorBlendAttachmentState); + ui_color_blend_attachment.colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; + ui_color_blend_attachment.blendEnable = c.VK_TRUE; + ui_color_blend_attachment.srcColorBlendFactor = c.VK_BLEND_FACTOR_SRC_ALPHA; + ui_color_blend_attachment.dstColorBlendFactor = c.VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + ui_color_blend_attachment.colorBlendOp = c.VK_BLEND_OP_ADD; + ui_color_blend_attachment.srcAlphaBlendFactor = c.VK_BLEND_FACTOR_ONE; + ui_color_blend_attachment.dstAlphaBlendFactor = c.VK_BLEND_FACTOR_ZERO; + ui_color_blend_attachment.alphaBlendOp = c.VK_BLEND_OP_ADD; + + var ui_color_blending = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); + ui_color_blending.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + ui_color_blending.attachmentCount = 1; + ui_color_blending.pAttachments = &ui_color_blend_attachment; + + const swapchain_ui_shaders = try loadShaderPair(allocator, vk_device, shader_registry.UI_VERT, shader_registry.UI_FRAG); + defer c.vkDestroyShaderModule(vk_device, swapchain_ui_shaders.vert, null); + defer c.vkDestroyShaderModule(vk_device, swapchain_ui_shaders.frag, null); + + var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = swapchain_ui_shaders.vert, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = swapchain_ui_shaders.frag, .pName = "main" }, + }; + + const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 6 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; + + var attribute_descriptions: [2]c.VkVertexInputAttributeDescription = undefined; + attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; + attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32A32_SFLOAT, .offset = 2 * 4 }; + + var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_input_info.vertexBindingDescriptionCount = 1; + vertex_input_info.pVertexBindingDescriptions = &binding_description; + vertex_input_info.vertexAttributeDescriptionCount = 2; + vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; + + var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = 2; + pipeline_info.pStages = &shader_stages[0]; + pipeline_info.pVertexInputState = &vertex_input_info; + pipeline_info.pInputAssemblyState = &input_assembly; + pipeline_info.pViewportState = &viewport_state; + pipeline_info.pRasterizationState = &rasterizer; + pipeline_info.pMultisampleState = &multisampling; + pipeline_info.pDepthStencilState = &depth_stencil; + pipeline_info.pColorBlendState = &ui_color_blending; + pipeline_info.pDynamicState = &dynamic_state; + pipeline_info.layout = self.ui_pipeline_layout; + pipeline_info.renderPass = ui_swapchain_render_pass; + pipeline_info.subpass = 0; + + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.ui_swapchain_pipeline)); + + const tex_swapchain_ui_shaders = try loadShaderPair(allocator, vk_device, shader_registry.UI_TEX_VERT, shader_registry.UI_TEX_FRAG); + defer c.vkDestroyShaderModule(vk_device, tex_swapchain_ui_shaders.vert, null); + defer c.vkDestroyShaderModule(vk_device, tex_swapchain_ui_shaders.frag, null); + + var tex_shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = tex_swapchain_ui_shaders.vert, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = tex_swapchain_ui_shaders.frag, .pName = "main" }, + }; + + pipeline_info.pStages = &tex_shader_stages[0]; + pipeline_info.layout = self.ui_tex_pipeline_layout; + pipeline_info.renderPass = ui_swapchain_render_pass; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.ui_swapchain_tex_pipeline)); +} + +pub fn createDebugShadowPipeline( + self: anytype, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, +) !void { + const debug_shadow_shaders = try loadShaderPair(allocator, vk_device, shader_registry.DEBUG_SHADOW_VERT, shader_registry.DEBUG_SHADOW_FRAG); + defer c.vkDestroyShaderModule(vk_device, debug_shadow_shaders.vert, null); + defer c.vkDestroyShaderModule(vk_device, debug_shadow_shaders.frag, null); + + var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = debug_shadow_shaders.vert, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = debug_shadow_shaders.frag, .pName = "main" }, + }; + + const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 4 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; + + var attribute_descriptions: [2]c.VkVertexInputAttributeDescription = undefined; + attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; + attribute_descriptions[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 2 * 4 }; + + var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_input_info.vertexBindingDescriptionCount = 1; + vertex_input_info.pVertexBindingDescriptions = &binding_description; + vertex_input_info.vertexAttributeDescriptionCount = 2; + vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; + + var ui_depth_stencil = depth_stencil.*; + ui_depth_stencil.depthTestEnable = c.VK_FALSE; + ui_depth_stencil.depthWriteEnable = c.VK_FALSE; + + const layout = self.debug_shadow_pipeline_layout orelse return error.MissingPipelineLayout; + + var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = 2; + pipeline_info.pStages = &shader_stages[0]; + pipeline_info.pVertexInputState = &vertex_input_info; + pipeline_info.pInputAssemblyState = input_assembly; + pipeline_info.pViewportState = viewport_state; + pipeline_info.pRasterizationState = rasterizer; + pipeline_info.pMultisampleState = multisampling; + pipeline_info.pDepthStencilState = &ui_depth_stencil; + pipeline_info.pColorBlendState = color_blending; + pipeline_info.pDynamicState = dynamic_state; + pipeline_info.layout = layout; + pipeline_info.renderPass = hdr_render_pass; + pipeline_info.subpass = 0; + + var pipeline: c.VkPipeline = null; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &pipeline)); + self.debug_shadow_pipeline = pipeline; +} + +pub fn createCloudPipeline( + self: anytype, + allocator: std.mem.Allocator, + vk_device: c.VkDevice, + hdr_render_pass: c.VkRenderPass, + viewport_state: *const c.VkPipelineViewportStateCreateInfo, + dynamic_state: *const c.VkPipelineDynamicStateCreateInfo, + input_assembly: *const c.VkPipelineInputAssemblyStateCreateInfo, + rasterizer: *const c.VkPipelineRasterizationStateCreateInfo, + multisampling: *const c.VkPipelineMultisampleStateCreateInfo, + depth_stencil: *const c.VkPipelineDepthStencilStateCreateInfo, + color_blending: *const c.VkPipelineColorBlendStateCreateInfo, +) !void { + const cloud_shaders = try loadShaderPair(allocator, vk_device, shader_registry.CLOUD_VERT, shader_registry.CLOUD_FRAG); + defer c.vkDestroyShaderModule(vk_device, cloud_shaders.vert, null); + defer c.vkDestroyShaderModule(vk_device, cloud_shaders.frag, null); + + var shader_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = cloud_shaders.vert, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = cloud_shaders.frag, .pName = "main" }, + }; + + const binding_description = c.VkVertexInputBindingDescription{ .binding = 0, .stride = 2 * @sizeOf(f32), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; + + var attribute_descriptions: [1]c.VkVertexInputAttributeDescription = undefined; + attribute_descriptions[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32_SFLOAT, .offset = 0 }; + + var vertex_input_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + vertex_input_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_input_info.vertexBindingDescriptionCount = 1; + vertex_input_info.pVertexBindingDescriptions = &binding_description; + vertex_input_info.vertexAttributeDescriptionCount = 1; + vertex_input_info.pVertexAttributeDescriptions = &attribute_descriptions[0]; + + var cloud_depth_stencil = depth_stencil.*; + cloud_depth_stencil.depthWriteEnable = c.VK_FALSE; + + var cloud_rasterizer = rasterizer.*; + cloud_rasterizer.frontFace = c.VK_FRONT_FACE_COUNTER_CLOCKWISE; + + var pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = 2; + pipeline_info.pStages = &shader_stages[0]; + pipeline_info.pVertexInputState = &vertex_input_info; + pipeline_info.pInputAssemblyState = input_assembly; + pipeline_info.pViewportState = viewport_state; + pipeline_info.pRasterizationState = &cloud_rasterizer; + pipeline_info.pMultisampleState = multisampling; + pipeline_info.pDepthStencilState = &cloud_depth_stencil; + pipeline_info.pColorBlendState = color_blending; + pipeline_info.pDynamicState = dynamic_state; + pipeline_info.layout = self.cloud_pipeline_layout; + pipeline_info.renderPass = hdr_render_pass; + pipeline_info.subpass = 0; + + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk_device, null, 1, &pipeline_info, null, &self.cloud_pipeline)); +} diff --git a/src/engine/graphics/vulkan/post_process_system.zig b/src/engine/graphics/vulkan/post_process_system.zig new file mode 100644 index 00000000..36b3eaaa --- /dev/null +++ b/src/engine/graphics/vulkan/post_process_system.zig @@ -0,0 +1,250 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Utils = @import("utils.zig"); +const shader_registry = @import("shader_registry.zig"); +const VulkanBuffer = @import("resource_manager.zig").VulkanBuffer; + +pub const PostProcessPushConstants = extern struct { + bloom_enabled: f32, + bloom_intensity: f32, + vignette_intensity: f32, + film_grain_intensity: f32, + color_grading_enabled: f32, // 0.0 = disabled, 1.0 = enabled + color_grading_intensity: f32, // LUT blend intensity (0.0 = original, 1.0 = full LUT) + _pad0: f32 = 0.0, + _pad1: f32 = 0.0, +}; + +pub const PostProcessSystem = struct { + pipeline: c.VkPipeline = null, + pipeline_layout: c.VkPipelineLayout = null, + descriptor_set_layout: c.VkDescriptorSetLayout = null, + descriptor_sets: [rhi.MAX_FRAMES_IN_FLIGHT]c.VkDescriptorSet = .{null} ** rhi.MAX_FRAMES_IN_FLIGHT, + sampler: c.VkSampler = null, + pass_active: bool = false, + lut_texture: rhi.TextureHandle = 0, + + pub fn init( + self: *PostProcessSystem, + vk: c.VkDevice, + allocator: std.mem.Allocator, + descriptor_pool: c.VkDescriptorPool, + render_pass: c.VkRenderPass, + hdr_view: c.VkImageView, + global_ubos: [rhi.MAX_FRAMES_IN_FLIGHT]VulkanBuffer, + global_uniform_size: usize, + ) !void { + if (render_pass == null) return error.RenderPassNotInitialized; + + if (self.descriptor_set_layout == null) { + var bindings = [_]c.VkDescriptorSetLayoutBinding{ + .{ .binding = 0, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + .{ .binding = 1, .descriptorType = c.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + .{ .binding = 2, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + .{ .binding = 3, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT }, + }; + var layout_info = std.mem.zeroes(c.VkDescriptorSetLayoutCreateInfo); + layout_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + layout_info.bindingCount = bindings.len; + layout_info.pBindings = &bindings[0]; + try Utils.checkVk(c.vkCreateDescriptorSetLayout(vk, &layout_info, null, &self.descriptor_set_layout)); + } + + if (self.pipeline_layout == null) { + var push_constant = std.mem.zeroes(c.VkPushConstantRange); + push_constant.stageFlags = c.VK_SHADER_STAGE_FRAGMENT_BIT; + push_constant.offset = 0; + push_constant.size = @sizeOf(PostProcessPushConstants); + + var pipe_layout_info = std.mem.zeroes(c.VkPipelineLayoutCreateInfo); + pipe_layout_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + pipe_layout_info.setLayoutCount = 1; + pipe_layout_info.pSetLayouts = &self.descriptor_set_layout; + pipe_layout_info.pushConstantRangeCount = 1; + pipe_layout_info.pPushConstantRanges = &push_constant; + try Utils.checkVk(c.vkCreatePipelineLayout(vk, &pipe_layout_info, null, &self.pipeline_layout)); + } + + if (self.sampler != null) c.vkDestroySampler(vk, self.sampler, null); + self.sampler = null; + + var sampler_info = std.mem.zeroes(c.VkSamplerCreateInfo); + sampler_info.sType = c.VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + sampler_info.magFilter = c.VK_FILTER_LINEAR; + sampler_info.minFilter = c.VK_FILTER_LINEAR; + sampler_info.addressModeU = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + sampler_info.addressModeV = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + sampler_info.addressModeW = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + sampler_info.mipmapMode = c.VK_SAMPLER_MIPMAP_MODE_LINEAR; + try Utils.checkVk(c.vkCreateSampler(vk, &sampler_info, null, &self.sampler)); + + if (self.pipeline != null) { + c.vkDestroyPipeline(vk, self.pipeline, null); + self.pipeline = null; + } + + const vert_code = try std.fs.cwd().readFileAlloc(shader_registry.POST_PROCESS_VERT, allocator, @enumFromInt(1024 * 1024)); + defer allocator.free(vert_code); + const frag_code = try std.fs.cwd().readFileAlloc(shader_registry.POST_PROCESS_FRAG, allocator, @enumFromInt(1024 * 1024)); + defer allocator.free(frag_code); + const vert_module = try Utils.createShaderModule(vk, vert_code); + defer c.vkDestroyShaderModule(vk, vert_module, null); + const frag_module = try Utils.createShaderModule(vk, frag_code); + defer c.vkDestroyShaderModule(vk, frag_module, null); + + var stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = vert_module, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = frag_module, .pName = "main" }, + }; + + var vi_info = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + vi_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + var ia_info = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); + ia_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + ia_info.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + + var vp_info = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); + vp_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + vp_info.viewportCount = 1; + vp_info.scissorCount = 1; + + var rs_info = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); + rs_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rs_info.lineWidth = 1.0; + rs_info.cullMode = c.VK_CULL_MODE_NONE; + rs_info.frontFace = c.VK_FRONT_FACE_COUNTER_CLOCKWISE; + + var ms_info = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); + ms_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + ms_info.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; + + var cb_attach = std.mem.zeroes(c.VkPipelineColorBlendAttachmentState); + cb_attach.colorWriteMask = c.VK_COLOR_COMPONENT_R_BIT | c.VK_COLOR_COMPONENT_G_BIT | c.VK_COLOR_COMPONENT_B_BIT | c.VK_COLOR_COMPONENT_A_BIT; + var cb_info = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); + cb_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + cb_info.attachmentCount = 1; + cb_info.pAttachments = &cb_attach; + + var dyn_states = [_]c.VkDynamicState{ c.VK_DYNAMIC_STATE_VIEWPORT, c.VK_DYNAMIC_STATE_SCISSOR }; + var dyn_info = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); + dyn_info.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dyn_info.dynamicStateCount = 2; + dyn_info.pDynamicStates = &dyn_states[0]; + + var pipe_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + pipe_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipe_info.stageCount = 2; + pipe_info.pStages = &stages[0]; + pipe_info.pVertexInputState = &vi_info; + pipe_info.pInputAssemblyState = &ia_info; + pipe_info.pViewportState = &vp_info; + pipe_info.pRasterizationState = &rs_info; + pipe_info.pMultisampleState = &ms_info; + pipe_info.pColorBlendState = &cb_info; + pipe_info.pDynamicState = &dyn_info; + pipe_info.layout = self.pipeline_layout; + pipe_info.renderPass = render_pass; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk, null, 1, &pipe_info, null, &self.pipeline)); + + for (0..rhi.MAX_FRAMES_IN_FLIGHT) |i| { + if (self.descriptor_sets[i] == null) { + var alloc_ds_info = std.mem.zeroes(c.VkDescriptorSetAllocateInfo); + alloc_ds_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + alloc_ds_info.descriptorPool = descriptor_pool; + alloc_ds_info.descriptorSetCount = 1; + alloc_ds_info.pSetLayouts = &self.descriptor_set_layout; + try Utils.checkVk(c.vkAllocateDescriptorSets(vk, &alloc_ds_info, &self.descriptor_sets[i])); + } + + var image_info = std.mem.zeroes(c.VkDescriptorImageInfo); + image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + image_info.imageView = hdr_view; + image_info.sampler = self.sampler; + + var buffer_info = std.mem.zeroes(c.VkDescriptorBufferInfo); + buffer_info.buffer = global_ubos[i].buffer; + buffer_info.offset = 0; + buffer_info.range = global_uniform_size; + + var writes = [_]c.VkWriteDescriptorSet{ + .{ .sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, .dstSet = self.descriptor_sets[i], .dstBinding = 0, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .pImageInfo = &image_info }, + .{ .sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, .dstSet = self.descriptor_sets[i], .dstBinding = 1, .descriptorType = c.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 1, .pBufferInfo = &buffer_info }, + .{ .sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, .dstSet = self.descriptor_sets[i], .dstBinding = 2, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .pImageInfo = &image_info }, + .{ .sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, .dstSet = self.descriptor_sets[i], .dstBinding = 3, .descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorCount = 1, .pImageInfo = &image_info }, // placeholder for LUT, updated later + }; + c.vkUpdateDescriptorSets(vk, writes.len, &writes[0], 0, null); + } + } + + pub fn updateBloomDescriptors(self: *PostProcessSystem, vk: c.VkDevice, bloom_view: c.VkImageView, bloom_sampler: c.VkSampler) void { + for (0..rhi.MAX_FRAMES_IN_FLIGHT) |i| { + if (self.descriptor_sets[i] == null) continue; + + var bloom_image_info = std.mem.zeroes(c.VkDescriptorImageInfo); + bloom_image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + bloom_image_info.imageView = bloom_view; + bloom_image_info.sampler = bloom_sampler; + + var write = std.mem.zeroes(c.VkWriteDescriptorSet); + write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write.dstSet = self.descriptor_sets[i]; + write.dstBinding = 2; + write.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + write.descriptorCount = 1; + write.pImageInfo = &bloom_image_info; + + c.vkUpdateDescriptorSets(vk, 1, &write, 0, null); + } + } + + pub fn updateLUTDescriptor(self: *PostProcessSystem, vk: c.VkDevice, lut_view: c.VkImageView, lut_sampler: c.VkSampler) void { + for (0..rhi.MAX_FRAMES_IN_FLIGHT) |i| { + if (self.descriptor_sets[i] == null) continue; + + var lut_image_info = std.mem.zeroes(c.VkDescriptorImageInfo); + lut_image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + lut_image_info.imageView = lut_view; + lut_image_info.sampler = lut_sampler; + + var write = std.mem.zeroes(c.VkWriteDescriptorSet); + write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write.dstSet = self.descriptor_sets[i]; + write.dstBinding = 3; + write.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + write.descriptorCount = 1; + write.pImageInfo = &lut_image_info; + + c.vkUpdateDescriptorSets(vk, 1, &write, 0, null); + } + } + + pub fn deinit(self: *PostProcessSystem, vk: c.VkDevice, descriptor_pool: c.VkDescriptorPool) void { + if (self.sampler != null) { + c.vkDestroySampler(vk, self.sampler, null); + self.sampler = null; + } + if (self.pipeline != null) { + c.vkDestroyPipeline(vk, self.pipeline, null); + self.pipeline = null; + } + if (self.pipeline_layout != null) { + c.vkDestroyPipelineLayout(vk, self.pipeline_layout, null); + self.pipeline_layout = null; + } + + for (0..rhi.MAX_FRAMES_IN_FLIGHT) |i| { + if (self.descriptor_sets[i] != null) { + _ = c.vkFreeDescriptorSets(vk, descriptor_pool, 1, &self.descriptor_sets[i]); + self.descriptor_sets[i] = null; + } + } + + if (self.descriptor_set_layout != null) { + c.vkDestroyDescriptorSetLayout(vk, self.descriptor_set_layout, null); + self.descriptor_set_layout = null; + } + + self.pass_active = false; + } +}; diff --git a/src/engine/graphics/vulkan/render_pass_manager.zig b/src/engine/graphics/vulkan/render_pass_manager.zig new file mode 100644 index 00000000..def0911f --- /dev/null +++ b/src/engine/graphics/vulkan/render_pass_manager.zig @@ -0,0 +1,584 @@ +//! Render Pass Manager - Handles all Vulkan render pass and framebuffer management +//! +//! Extracted from rhi_vulkan.zig to eliminate the god object anti-pattern. +//! This module is responsible for: +//! - Creating and destroying render passes +//! - Managing framebuffers for different rendering stages +//! - Handling HDR, G-Pass, post-process, and UI render passes + +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const VulkanDevice = @import("../vulkan_device.zig").VulkanDevice; +const Utils = @import("utils.zig"); + +/// Depth format used throughout the renderer +const DEPTH_FORMAT = c.VK_FORMAT_D32_SFLOAT; + +/// Render pass manager handles all render pass and framebuffer resources +pub const RenderPassManager = struct { + allocator: ?std.mem.Allocator = null, + + // Main render pass (HDR with optional MSAA) + hdr_render_pass: c.VkRenderPass = null, + + // G-Pass render pass (for SSAO prep) + g_render_pass: c.VkRenderPass = null, + + // Post-process render pass + post_process_render_pass: c.VkRenderPass = null, + + // UI render pass (for swapchain overlay) + ui_swapchain_render_pass: c.VkRenderPass = null, + + // Framebuffers + main_framebuffer: c.VkFramebuffer = null, + g_framebuffer: c.VkFramebuffer = null, + post_process_framebuffers: std.ArrayListUnmanaged(c.VkFramebuffer) = .empty, + ui_swapchain_framebuffers: std.ArrayListUnmanaged(c.VkFramebuffer) = .empty, + + /// Initialize the render pass manager + pub fn init(allocator: std.mem.Allocator) RenderPassManager { + return .{ + .allocator = allocator, + .post_process_framebuffers = .empty, + .ui_swapchain_framebuffers = .empty, + }; + } + + /// Deinitialize and destroy all render passes and framebuffers + pub fn deinit(self: *RenderPassManager, vk_device: c.VkDevice) void { + if (self.allocator) |allocator| { + self.destroyFramebuffers(vk_device, allocator); + } + self.destroyRenderPasses(vk_device); + self.allocator = null; + } + + /// Destroy all framebuffers + pub fn destroyFramebuffers(self: *RenderPassManager, vk_device: c.VkDevice, allocator: std.mem.Allocator) void { + if (self.main_framebuffer) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + self.main_framebuffer = null; + } + + if (self.g_framebuffer) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + self.g_framebuffer = null; + } + + for (self.post_process_framebuffers.items) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + } + self.post_process_framebuffers.deinit(allocator); + self.post_process_framebuffers = .empty; + + for (self.ui_swapchain_framebuffers.items) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + } + self.ui_swapchain_framebuffers.deinit(allocator); + self.ui_swapchain_framebuffers = .empty; + } + + /// Destroy all render passes + fn destroyRenderPasses(self: *RenderPassManager, vk_device: c.VkDevice) void { + if (self.hdr_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.hdr_render_pass = null; + } + + if (self.g_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.g_render_pass = null; + } + + if (self.post_process_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.post_process_render_pass = null; + } + + if (self.ui_swapchain_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.ui_swapchain_render_pass = null; + } + } + + /// Create the main HDR render pass (with optional MSAA) + pub fn createMainRenderPass( + self: *RenderPassManager, + vk_device: c.VkDevice, + _extent: c.VkExtent2D, + msaa_samples: u8, + ) !void { + _ = _extent; + // Destroy existing render pass + if (self.hdr_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.hdr_render_pass = null; + } + + const sample_count = getMSAASampleCountFlag(msaa_samples); + const use_msaa = msaa_samples > 1; + const hdr_format = c.VK_FORMAT_R16G16B16A16_SFLOAT; + + if (use_msaa) { + // MSAA render pass: 3 attachments (MSAA color, MSAA depth, resolve) + var msaa_color_attachment = std.mem.zeroes(c.VkAttachmentDescription); + msaa_color_attachment.format = hdr_format; + msaa_color_attachment.samples = sample_count; + msaa_color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + msaa_color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + msaa_color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + msaa_color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + msaa_color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + msaa_color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + + var depth_attachment = std.mem.zeroes(c.VkAttachmentDescription); + depth_attachment.format = DEPTH_FORMAT; + depth_attachment.samples = sample_count; + depth_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + depth_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + depth_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + depth_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + depth_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + depth_attachment.finalLayout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + + var resolve_attachment = std.mem.zeroes(c.VkAttachmentDescription); + resolve_attachment.format = hdr_format; + resolve_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; + resolve_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + resolve_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + resolve_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + resolve_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + resolve_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + resolve_attachment.finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + var color_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; + var depth_ref = c.VkAttachmentReference{ .attachment = 1, .layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }; + var resolve_ref = c.VkAttachmentReference{ .attachment = 2, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; + + var subpass = std.mem.zeroes(c.VkSubpassDescription); + subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &color_ref; + subpass.pDepthStencilAttachment = &depth_ref; + subpass.pResolveAttachments = &resolve_ref; + + var dependencies = [_]c.VkSubpassDependency{ + .{ + .srcSubpass = c.VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + .dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .srcAccessMask = c.VK_ACCESS_MEMORY_READ_BIT, + .dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, + }, + .{ + .srcSubpass = 0, + .dstSubpass = c.VK_SUBPASS_EXTERNAL, + .srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + .srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + .dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT, + .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, + }, + }; + + var attachment_descs = [_]c.VkAttachmentDescription{ msaa_color_attachment, depth_attachment, resolve_attachment }; + var render_pass_info = std.mem.zeroes(c.VkRenderPassCreateInfo); + render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + render_pass_info.attachmentCount = 3; + render_pass_info.pAttachments = &attachment_descs[0]; + render_pass_info.subpassCount = 1; + render_pass_info.pSubpasses = &subpass; + render_pass_info.dependencyCount = 2; + render_pass_info.pDependencies = &dependencies[0]; + + try Utils.checkVk(c.vkCreateRenderPass(vk_device, &render_pass_info, null, &self.hdr_render_pass)); + std.log.info("Created HDR MSAA {}x render pass", .{msaa_samples}); + } else { + // Non-MSAA render pass: 2 attachments (color, depth) + var color_attachment = std.mem.zeroes(c.VkAttachmentDescription); + color_attachment.format = hdr_format; + color_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; + color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + var depth_attachment = std.mem.zeroes(c.VkAttachmentDescription); + depth_attachment.format = DEPTH_FORMAT; + depth_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; + depth_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + depth_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + depth_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + depth_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + depth_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + depth_attachment.finalLayout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + + var color_attachment_ref = std.mem.zeroes(c.VkAttachmentReference); + color_attachment_ref.attachment = 0; + color_attachment_ref.layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + + var depth_attachment_ref = std.mem.zeroes(c.VkAttachmentReference); + depth_attachment_ref.attachment = 1; + depth_attachment_ref.layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + + var subpass = std.mem.zeroes(c.VkSubpassDescription); + subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &color_attachment_ref; + subpass.pDepthStencilAttachment = &depth_attachment_ref; + + var dependencies = [_]c.VkSubpassDependency{ + .{ + .srcSubpass = c.VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + .dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .srcAccessMask = c.VK_ACCESS_MEMORY_READ_BIT, + .dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, + }, + .{ + .srcSubpass = 0, + .dstSubpass = c.VK_SUBPASS_EXTERNAL, + .srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + .srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + .dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT, + .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT, + }, + }; + + var attachments = [_]c.VkAttachmentDescription{ color_attachment, depth_attachment }; + var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); + rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + rp_info.attachmentCount = 2; + rp_info.pAttachments = &attachments[0]; + rp_info.subpassCount = 1; + rp_info.pSubpasses = &subpass; + rp_info.dependencyCount = 2; + rp_info.pDependencies = &dependencies[0]; + + try Utils.checkVk(c.vkCreateRenderPass(vk_device, &rp_info, null, &self.hdr_render_pass)); + } + } + + /// Create the G-Pass render pass (for SSAO prep) + pub fn createGPassRenderPass(self: *RenderPassManager, vk_device: c.VkDevice) !void { + if (self.g_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.g_render_pass = null; + } + + const normal_format = c.VK_FORMAT_R8G8B8A8_UNORM; + const velocity_format = c.VK_FORMAT_R16G16_SFLOAT; + + var attachments: [3]c.VkAttachmentDescription = undefined; + + // Attachment 0: Normal buffer (color output) + attachments[0] = std.mem.zeroes(c.VkAttachmentDescription); + attachments[0].format = normal_format; + attachments[0].samples = c.VK_SAMPLE_COUNT_1_BIT; + attachments[0].loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + attachments[0].storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + attachments[0].stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachments[0].stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachments[0].initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + attachments[0].finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + // Attachment 1: Velocity buffer (color output for motion vectors) + attachments[1] = std.mem.zeroes(c.VkAttachmentDescription); + attachments[1].format = velocity_format; + attachments[1].samples = c.VK_SAMPLE_COUNT_1_BIT; + attachments[1].loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + attachments[1].storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + attachments[1].stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachments[1].stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachments[1].initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + attachments[1].finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + // Attachment 2: Depth buffer + attachments[2] = std.mem.zeroes(c.VkAttachmentDescription); + attachments[2].format = DEPTH_FORMAT; + attachments[2].samples = c.VK_SAMPLE_COUNT_1_BIT; + attachments[2].loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + attachments[2].storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + attachments[2].stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachments[2].stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachments[2].initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + attachments[2].finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + var color_refs = [_]c.VkAttachmentReference{ + c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }, + c.VkAttachmentReference{ .attachment = 1, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }, + }; + var depth_ref = c.VkAttachmentReference{ .attachment = 2, .layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }; + + var subpass = std.mem.zeroes(c.VkSubpassDescription); + subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 2; + subpass.pColorAttachments = &color_refs; + subpass.pDepthStencilAttachment = &depth_ref; + + var deps: [2]c.VkSubpassDependency = undefined; + deps[0] = std.mem.zeroes(c.VkSubpassDependency); + deps[0].srcSubpass = c.VK_SUBPASS_EXTERNAL; + deps[0].dstSubpass = 0; + deps[0].srcStageMask = c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + deps[0].dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | c.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; + deps[0].srcAccessMask = c.VK_ACCESS_MEMORY_READ_BIT; + deps[0].dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + deps[0].dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT; + + deps[1] = std.mem.zeroes(c.VkSubpassDependency); + deps[1].srcSubpass = 0; + deps[1].dstSubpass = c.VK_SUBPASS_EXTERNAL; + deps[1].srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | c.VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + deps[1].dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + deps[1].srcAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + deps[1].dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + deps[1].dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT; + + var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); + rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + rp_info.attachmentCount = 3; + rp_info.pAttachments = &attachments; + rp_info.subpassCount = 1; + rp_info.pSubpasses = &subpass; + rp_info.dependencyCount = 2; + rp_info.pDependencies = &deps; + + try Utils.checkVk(c.vkCreateRenderPass(vk_device, &rp_info, null, &self.g_render_pass)); + } + + /// Create post-process render pass + pub fn createPostProcessRenderPass(self: *RenderPassManager, vk_device: c.VkDevice, swapchain_format: c.VkFormat) !void { + if (self.post_process_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.post_process_render_pass = null; + } + + var color_attachment = std.mem.zeroes(c.VkAttachmentDescription); + color_attachment.format = swapchain_format; + color_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; + color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + + var color_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; + + var subpass = std.mem.zeroes(c.VkSubpassDescription); + subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &color_ref; + + var dependency = std.mem.zeroes(c.VkSubpassDependency); + dependency.srcSubpass = c.VK_SUBPASS_EXTERNAL; + dependency.dstSubpass = 0; + dependency.srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.srcAccessMask = 0; + dependency.dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + + var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); + rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + rp_info.attachmentCount = 1; + rp_info.pAttachments = &color_attachment; + rp_info.subpassCount = 1; + rp_info.pSubpasses = &subpass; + rp_info.dependencyCount = 1; + rp_info.pDependencies = &dependency; + + try Utils.checkVk(c.vkCreateRenderPass(vk_device, &rp_info, null, &self.post_process_render_pass)); + } + + /// Create UI swapchain render pass + pub fn createUISwapchainRenderPass(self: *RenderPassManager, vk_device: c.VkDevice, swapchain_format: c.VkFormat) !void { + if (self.ui_swapchain_render_pass) |rp| { + c.vkDestroyRenderPass(vk_device, rp, null); + self.ui_swapchain_render_pass = null; + } + + var color_attachment = std.mem.zeroes(c.VkAttachmentDescription); + color_attachment.format = swapchain_format; + color_attachment.samples = c.VK_SAMPLE_COUNT_1_BIT; + color_attachment.loadOp = c.VK_ATTACHMENT_LOAD_OP_LOAD; + color_attachment.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + color_attachment.stencilLoadOp = c.VK_ATTACHMENT_LOAD_OP_DONT_CARE; + color_attachment.stencilStoreOp = c.VK_ATTACHMENT_STORE_OP_DONT_CARE; + color_attachment.initialLayout = c.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + color_attachment.finalLayout = c.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + + var color_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; + + var subpass = std.mem.zeroes(c.VkSubpassDescription); + subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &color_ref; + + var dependency = std.mem.zeroes(c.VkSubpassDependency); + dependency.srcSubpass = c.VK_SUBPASS_EXTERNAL; + dependency.dstSubpass = 0; + dependency.srcStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.srcAccessMask = 0; + dependency.dstStageMask = c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | c.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; + dependency.dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT; + + var rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); + rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + rp_info.attachmentCount = 1; + rp_info.pAttachments = &color_attachment; + rp_info.subpassCount = 1; + rp_info.pSubpasses = &subpass; + rp_info.dependencyCount = 1; + rp_info.pDependencies = &dependency; + + try Utils.checkVk(c.vkCreateRenderPass(vk_device, &rp_info, null, &self.ui_swapchain_render_pass)); + } + + /// Create main framebuffer + pub fn createMainFramebuffer( + self: *RenderPassManager, + vk_device: c.VkDevice, + extent: c.VkExtent2D, + hdr_view: c.VkImageView, + hdr_msaa_view: ?c.VkImageView, + depth_view: c.VkImageView, + msaa_samples: u8, + ) !void { + if (self.main_framebuffer) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + self.main_framebuffer = null; + } + + var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); + fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + fb_info.renderPass = self.hdr_render_pass; + fb_info.width = extent.width; + fb_info.height = extent.height; + fb_info.layers = 1; + + const use_msaa = msaa_samples > 1; + + if (use_msaa and hdr_msaa_view != null) { + // MSAA: [MSAA Color, MSAA Depth, Resolve HDR] + const attachments = [_]c.VkImageView{ hdr_msaa_view.?, depth_view, hdr_view }; + fb_info.attachmentCount = 3; + fb_info.pAttachments = &attachments[0]; + } else { + // Non-MSAA: [HDR Color, Depth] + const attachments = [_]c.VkImageView{ hdr_view, depth_view }; + fb_info.attachmentCount = 2; + fb_info.pAttachments = &attachments[0]; + } + + try Utils.checkVk(c.vkCreateFramebuffer(vk_device, &fb_info, null, &self.main_framebuffer)); + } + + /// Create G-Pass framebuffer + pub fn createGPassFramebuffer( + self: *RenderPassManager, + vk_device: c.VkDevice, + extent: c.VkExtent2D, + normal_view: c.VkImageView, + velocity_view: c.VkImageView, + depth_view: c.VkImageView, + ) !void { + if (self.g_framebuffer) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + self.g_framebuffer = null; + } + + const attachments = [_]c.VkImageView{ normal_view, velocity_view, depth_view }; + + var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); + fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + fb_info.renderPass = self.g_render_pass; + fb_info.attachmentCount = 3; + fb_info.pAttachments = &attachments; + fb_info.width = extent.width; + fb_info.height = extent.height; + fb_info.layers = 1; + + try Utils.checkVk(c.vkCreateFramebuffer(vk_device, &fb_info, null, &self.g_framebuffer)); + } + + /// Create post-process framebuffers (one per swapchain image) + pub fn createPostProcessFramebuffers( + self: *RenderPassManager, + vk_device: c.VkDevice, + allocator: std.mem.Allocator, + extent: c.VkExtent2D, + swapchain_image_views: []const c.VkImageView, + ) !void { + // Clear existing + for (self.post_process_framebuffers.items) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + } + self.post_process_framebuffers.clearRetainingCapacity(); + + for (swapchain_image_views) |view| { + var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); + fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + fb_info.renderPass = self.post_process_render_pass; + fb_info.attachmentCount = 1; + fb_info.pAttachments = &view; + fb_info.width = extent.width; + fb_info.height = extent.height; + fb_info.layers = 1; + + var fb: c.VkFramebuffer = null; + try Utils.checkVk(c.vkCreateFramebuffer(vk_device, &fb_info, null, &fb)); + try self.post_process_framebuffers.append(allocator, fb); + } + } + + /// Create UI swapchain framebuffers + pub fn createUISwapchainFramebuffers( + self: *RenderPassManager, + vk_device: c.VkDevice, + allocator: std.mem.Allocator, + extent: c.VkExtent2D, + swapchain_image_views: []const c.VkImageView, + ) !void { + // Clear existing + for (self.ui_swapchain_framebuffers.items) |fb| { + c.vkDestroyFramebuffer(vk_device, fb, null); + } + self.ui_swapchain_framebuffers.clearRetainingCapacity(); + + for (swapchain_image_views) |view| { + var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); + fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + fb_info.renderPass = self.ui_swapchain_render_pass; + fb_info.attachmentCount = 1; + fb_info.pAttachments = &view; + fb_info.width = extent.width; + fb_info.height = extent.height; + fb_info.layers = 1; + + var fb: c.VkFramebuffer = null; + try Utils.checkVk(c.vkCreateFramebuffer(vk_device, &fb_info, null, &fb)); + try self.ui_swapchain_framebuffers.append(allocator, fb); + } + } +}; + +/// Converts MSAA sample count (1, 2, 4, 8) to Vulkan sample count flag. +fn getMSAASampleCountFlag(samples: u8) c.VkSampleCountFlagBits { + return switch (samples) { + 2 => c.VK_SAMPLE_COUNT_2_BIT, + 4 => c.VK_SAMPLE_COUNT_4_BIT, + 8 => c.VK_SAMPLE_COUNT_8_BIT, + else => c.VK_SAMPLE_COUNT_1_BIT, + }; +} diff --git a/src/engine/graphics/vulkan/resource_manager.zig b/src/engine/graphics/vulkan/resource_manager.zig index 842ea158..242ba8df 100644 --- a/src/engine/graphics/vulkan/resource_manager.zig +++ b/src/engine/graphics/vulkan/resource_manager.zig @@ -3,6 +3,7 @@ const c = @import("../../../c.zig").c; const rhi = @import("../rhi.zig"); const VulkanDevice = @import("../vulkan_device.zig").VulkanDevice; const Utils = @import("utils.zig"); +const resource_texture_ops = @import("resource_texture_ops.zig"); /// Vulkan buffer with backing memory. pub const VulkanBuffer = Utils.VulkanBuffer; @@ -15,8 +16,10 @@ pub const TextureResource = struct { sampler: c.VkSampler, width: u32, height: u32, + depth: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, + is_3d: bool = false, is_owned: bool = true, }; @@ -68,7 +71,7 @@ const StagingBuffer = struct { /// Allocates space in the staging buffer. Returns offset if successful, null if full. /// Aligns allocation to 256 bytes (common minUniformBufferOffsetAlignment/optimal copy offset). - fn allocate(self: *StagingBuffer, size: u64) ?u64 { + pub fn allocate(self: *StagingBuffer, size: u64) ?u64 { const alignment = 256; // Safe alignment for most GPU copy operations const aligned_offset = std.mem.alignForward(u64, self.current_offset, alignment); @@ -267,7 +270,7 @@ pub const ResourceManager = struct { self.transfer_ready = false; } - fn prepareTransfer(self: *ResourceManager) !c.VkCommandBuffer { + pub fn prepareTransfer(self: *ResourceManager) !c.VkCommandBuffer { if (self.transfer_ready) return self.transfer_command_buffers[self.current_frame_index]; const cb = self.transfer_command_buffers[self.current_frame_index]; @@ -369,233 +372,11 @@ pub const ResourceManager = struct { } pub fn createTexture(self: *ResourceManager, width: u32, height: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, data_opt: ?[]const u8) rhi.RhiError!rhi.TextureHandle { - const vk_format: c.VkFormat = switch (format) { - .rgba => c.VK_FORMAT_R8G8B8A8_UNORM, - .rgba_srgb => c.VK_FORMAT_R8G8B8A8_SRGB, - .rgb => c.VK_FORMAT_R8G8B8_UNORM, - .red => c.VK_FORMAT_R8_UNORM, - .depth => c.VK_FORMAT_D32_SFLOAT, - .rgba32f => c.VK_FORMAT_R32G32B32A32_SFLOAT, - }; - - const mip_levels: u32 = if (config.generate_mipmaps and format != .depth) - @as(u32, @intFromFloat(@floor(std.math.log2(@as(f32, @floatFromInt(@max(width, height))))))) + 1 - else - 1; - - const aspect_mask: c.VkImageAspectFlags = if (format == .depth) - c.VK_IMAGE_ASPECT_DEPTH_BIT - else - c.VK_IMAGE_ASPECT_COLOR_BIT; - - var usage_flags: c.VkImageUsageFlags = if (format == .depth) - c.VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT - else - c.VK_IMAGE_USAGE_TRANSFER_DST_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; - - if (mip_levels > 1) { - usage_flags |= c.VK_IMAGE_USAGE_TRANSFER_SRC_BIT; - } - - if (config.is_render_target) { - usage_flags |= c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; - } - - var staging_offset: u64 = 0; - var staging_ptr: ?*StagingBuffer = null; - if (data_opt) |data| { - const staging = &self.staging_buffers[self.current_frame_index]; - const offset = staging.allocate(data.len) orelse return error.OutOfMemory; - if (staging.mapped_ptr == null) return error.OutOfMemory; - staging_offset = offset; - staging_ptr = staging; - } - - const device = self.vulkan_device.vk_device; - - var image: c.VkImage = null; - var image_info = std.mem.zeroes(c.VkImageCreateInfo); - image_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - image_info.imageType = c.VK_IMAGE_TYPE_2D; - image_info.extent.width = width; - image_info.extent.height = height; - image_info.extent.depth = 1; - image_info.mipLevels = mip_levels; - image_info.arrayLayers = 1; - image_info.format = vk_format; - image_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; - image_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - image_info.usage = usage_flags; - image_info.samples = c.VK_SAMPLE_COUNT_1_BIT; - image_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; - - try Utils.checkVk(c.vkCreateImage(device, &image_info, null, &image)); - errdefer c.vkDestroyImage(device, image, null); - - var mem_reqs: c.VkMemoryRequirements = undefined; - c.vkGetImageMemoryRequirements(device, image, &mem_reqs); - - var memory: c.VkDeviceMemory = null; - var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); - alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; - alloc_info.allocationSize = mem_reqs.size; - alloc_info.memoryTypeIndex = try Utils.findMemoryType(self.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - - try Utils.checkVk(c.vkAllocateMemory(device, &alloc_info, null, &memory)); - errdefer c.vkFreeMemory(device, memory, null); - - try Utils.checkVk(c.vkBindImageMemory(device, image, memory, 0)); - - // Upload data if present - if (data_opt) |data| { - const staging = staging_ptr orelse return error.OutOfMemory; - std.debug.assert(staging.mapped_ptr != null); - const dest = @as([*]u8, @ptrCast(staging.mapped_ptr.?)) + staging_offset; - @memcpy(dest[0..data.len], data); - - const transfer_cb = try self.prepareTransfer(); - - var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); - barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.oldLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - barrier.newLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.image = image; - barrier.subresourceRange.aspectMask = aspect_mask; - barrier.subresourceRange.baseMipLevel = 0; - barrier.subresourceRange.levelCount = mip_levels; - barrier.subresourceRange.baseArrayLayer = 0; - barrier.subresourceRange.layerCount = 1; - barrier.srcAccessMask = 0; - barrier.dstAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; - - c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, c.VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, null, 0, null, 1, &barrier); - - var region = std.mem.zeroes(c.VkBufferImageCopy); - region.bufferOffset = staging_offset; - region.imageSubresource.aspectMask = aspect_mask; - region.imageSubresource.layerCount = 1; - region.imageExtent = .{ .width = width, .height = height, .depth = 1 }; - - c.vkCmdCopyBufferToImage(transfer_cb, staging.buffer, image, c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); - - if (mip_levels > 1) { - // Generate mipmaps (simplified blit loop) - var mip_width: i32 = @intCast(width); - var mip_height: i32 = @intCast(height); - - for (1..mip_levels) |i| { - barrier.subresourceRange.baseMipLevel = @intCast(i - 1); - barrier.subresourceRange.levelCount = 1; - barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - barrier.newLayout = c.VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; - barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; - barrier.dstAccessMask = c.VK_ACCESS_TRANSFER_READ_BIT; - - c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, null, 0, null, 1, &barrier); - - var blit = std.mem.zeroes(c.VkImageBlit); - blit.srcOffsets[0] = .{ .x = 0, .y = 0, .z = 0 }; - blit.srcOffsets[1] = .{ .x = mip_width, .y = mip_height, .z = 1 }; - blit.srcSubresource.aspectMask = aspect_mask; - blit.srcSubresource.mipLevel = @intCast(i - 1); - blit.srcSubresource.baseArrayLayer = 0; - blit.srcSubresource.layerCount = 1; - - const next_width = if (mip_width > 1) @divFloor(mip_width, 2) else 1; - const next_height = if (mip_height > 1) @divFloor(mip_height, 2) else 1; - - blit.dstOffsets[0] = .{ .x = 0, .y = 0, .z = 0 }; - blit.dstOffsets[1] = .{ .x = next_width, .y = next_height, .z = 1 }; - blit.dstSubresource.aspectMask = aspect_mask; - blit.dstSubresource.mipLevel = @intCast(i); - blit.dstSubresource.baseArrayLayer = 0; - blit.dstSubresource.layerCount = 1; - - c.vkCmdBlitImage(transfer_cb, image, c.VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image, c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, c.VK_FILTER_LINEAR); - - barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; - barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_READ_BIT; - barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - - c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); - - if (mip_width > 1) mip_width = @divFloor(mip_width, 2); - if (mip_height > 1) mip_height = @divFloor(mip_height, 2); - } - - // Transition last mip level - barrier.subresourceRange.baseMipLevel = @intCast(mip_levels - 1); - barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; - barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - - c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); - } else { - barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; - barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); - } - } else { - // No data - transition to SHADER_READ_ONLY_OPTIMAL - const transfer_cb = try self.prepareTransfer(); - - var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); - barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.oldLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; - barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; - barrier.image = image; - barrier.subresourceRange.aspectMask = aspect_mask; - barrier.subresourceRange.baseMipLevel = 0; - barrier.subresourceRange.levelCount = mip_levels; - barrier.subresourceRange.baseArrayLayer = 0; - barrier.subresourceRange.layerCount = 1; - barrier.srcAccessMask = 0; - barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; - - c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); - } - - var view: c.VkImageView = null; - var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); - view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - view_info.image = image; - view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; - view_info.format = vk_format; - view_info.subresourceRange.aspectMask = aspect_mask; - view_info.subresourceRange.baseMipLevel = 0; - view_info.subresourceRange.levelCount = mip_levels; - view_info.subresourceRange.baseArrayLayer = 0; - view_info.subresourceRange.layerCount = 1; - - const sampler = try Utils.createSampler(self.vulkan_device, config, mip_levels, self.vulkan_device.max_anisotropy); - errdefer c.vkDestroySampler(device, sampler, null); - - try Utils.checkVk(c.vkCreateImageView(device, &view_info, null, &view)); - errdefer c.vkDestroyImageView(device, view, null); - - const handle = self.next_texture_handle; - self.next_texture_handle += 1; - try self.textures.put(handle, .{ - .image = image, - .memory = memory, - .view = view, - .sampler = sampler, - .width = width, - .height = height, - .format = format, - .config = config, - .is_owned = true, - }); + return resource_texture_ops.createTexture(self, width, height, format, config, data_opt); + } - return handle; + pub fn createTexture3D(self: *ResourceManager, width: u32, height: u32, depth: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, data_opt: ?[]const u8) rhi.RhiError!rhi.TextureHandle { + return resource_texture_ops.createTexture3D(self, width, height, depth, format, config, data_opt); } pub fn destroyTexture(self: *ResourceManager, handle: rhi.TextureHandle) void { @@ -623,8 +404,10 @@ pub const ResourceManager = struct { .sampler = sampler, .width = width, .height = height, + .depth = 1, .format = format, .config = .{}, // Default config + .is_3d = false, .is_owned = false, }); @@ -644,8 +427,10 @@ pub const ResourceManager = struct { .sampler = sampler, .width = width, .height = height, + .depth = 1, .format = format, .config = .{}, + .is_3d = false, .is_owned = false, }); return handle; @@ -684,7 +469,7 @@ pub const ResourceManager = struct { region.bufferOffset = offset; region.imageSubresource.aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; - region.imageExtent = .{ .width = tex.width, .height = tex.height, .depth = 1 }; + region.imageExtent = .{ .width = tex.width, .height = tex.height, .depth = tex.depth }; c.vkCmdCopyBufferToImage(transfer_cb, staging.buffer, tex.image.?, c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); diff --git a/src/engine/graphics/vulkan/resource_texture_ops.zig b/src/engine/graphics/vulkan/resource_texture_ops.zig new file mode 100644 index 00000000..04d3cf8a --- /dev/null +++ b/src/engine/graphics/vulkan/resource_texture_ops.zig @@ -0,0 +1,377 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Utils = @import("utils.zig"); + +pub fn createTexture(self: anytype, width: u32, height: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, data_opt: ?[]const u8) rhi.RhiError!rhi.TextureHandle { + const vk_format: c.VkFormat = switch (format) { + .rgba => c.VK_FORMAT_R8G8B8A8_UNORM, + .rgba_srgb => c.VK_FORMAT_R8G8B8A8_SRGB, + .rgb => c.VK_FORMAT_R8G8B8_UNORM, + .red => c.VK_FORMAT_R8_UNORM, + .depth => c.VK_FORMAT_D32_SFLOAT, + .rgba32f => c.VK_FORMAT_R32G32B32A32_SFLOAT, + }; + + const mip_levels: u32 = if (config.generate_mipmaps and format != .depth) + @as(u32, @intFromFloat(@floor(std.math.log2(@as(f32, @floatFromInt(@max(width, height))))))) + 1 + else + 1; + + const aspect_mask: c.VkImageAspectFlags = if (format == .depth) + c.VK_IMAGE_ASPECT_DEPTH_BIT + else + c.VK_IMAGE_ASPECT_COLOR_BIT; + + var usage_flags: c.VkImageUsageFlags = if (format == .depth) + c.VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT + else + c.VK_IMAGE_USAGE_TRANSFER_DST_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; + + if (mip_levels > 1) usage_flags |= c.VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + if (config.is_render_target) usage_flags |= c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + if (format == .rgba32f) usage_flags |= c.VK_IMAGE_USAGE_STORAGE_BIT; + + var staging_offset: u64 = 0; + if (data_opt) |data| { + const staging = &self.staging_buffers[self.current_frame_index]; + const offset = staging.allocate(data.len) orelse return error.OutOfMemory; + if (staging.mapped_ptr == null) return error.OutOfMemory; + staging_offset = offset; + } + + const device = self.vulkan_device.vk_device; + + var image: c.VkImage = null; + var image_info = std.mem.zeroes(c.VkImageCreateInfo); + image_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.imageType = c.VK_IMAGE_TYPE_2D; + image_info.extent.width = width; + image_info.extent.height = height; + image_info.extent.depth = 1; + image_info.mipLevels = mip_levels; + image_info.arrayLayers = 1; + image_info.format = vk_format; + image_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; + image_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + image_info.usage = usage_flags; + image_info.samples = c.VK_SAMPLE_COUNT_1_BIT; + image_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; + + try Utils.checkVk(c.vkCreateImage(device, &image_info, null, &image)); + errdefer c.vkDestroyImage(device, image, null); + + var mem_reqs: c.VkMemoryRequirements = undefined; + c.vkGetImageMemoryRequirements(device, image, &mem_reqs); + + var memory: c.VkDeviceMemory = null; + var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = mem_reqs.size; + alloc_info.memoryTypeIndex = try Utils.findMemoryType(self.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + + try Utils.checkVk(c.vkAllocateMemory(device, &alloc_info, null, &memory)); + errdefer c.vkFreeMemory(device, memory, null); + + try Utils.checkVk(c.vkBindImageMemory(device, image, memory, 0)); + + if (data_opt) |data| { + const staging = &self.staging_buffers[self.current_frame_index]; + if (staging.mapped_ptr == null) return error.OutOfMemory; + const dest = @as([*]u8, @ptrCast(staging.mapped_ptr.?)) + staging_offset; + @memcpy(dest[0..data.len], data); + + const transfer_cb = try self.prepareTransfer(); + + var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); + barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.oldLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + barrier.newLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + barrier.subresourceRange.aspectMask = aspect_mask; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = mip_levels; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = 1; + barrier.srcAccessMask = 0; + barrier.dstAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; + + c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, c.VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, null, 0, null, 1, &barrier); + + var region = std.mem.zeroes(c.VkBufferImageCopy); + region.bufferOffset = staging_offset; + region.imageSubresource.aspectMask = aspect_mask; + region.imageSubresource.layerCount = 1; + region.imageExtent = .{ .width = width, .height = height, .depth = 1 }; + + c.vkCmdCopyBufferToImage(transfer_cb, staging.buffer, image, c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + + if (mip_levels > 1) { + var mip_width: i32 = @intCast(width); + var mip_height: i32 = @intCast(height); + + for (1..mip_levels) |i| { + barrier.subresourceRange.baseMipLevel = @intCast(i - 1); + barrier.subresourceRange.levelCount = 1; + barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.newLayout = c.VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; + barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = c.VK_ACCESS_TRANSFER_READ_BIT; + + c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, null, 0, null, 1, &barrier); + + var blit = std.mem.zeroes(c.VkImageBlit); + blit.srcOffsets[0] = .{ .x = 0, .y = 0, .z = 0 }; + blit.srcOffsets[1] = .{ .x = mip_width, .y = mip_height, .z = 1 }; + blit.srcSubresource.aspectMask = aspect_mask; + blit.srcSubresource.mipLevel = @intCast(i - 1); + blit.srcSubresource.baseArrayLayer = 0; + blit.srcSubresource.layerCount = 1; + + const next_width = if (mip_width > 1) @divFloor(mip_width, 2) else 1; + const next_height = if (mip_height > 1) @divFloor(mip_height, 2) else 1; + + blit.dstOffsets[0] = .{ .x = 0, .y = 0, .z = 0 }; + blit.dstOffsets[1] = .{ .x = next_width, .y = next_height, .z = 1 }; + blit.dstSubresource.aspectMask = aspect_mask; + blit.dstSubresource.mipLevel = @intCast(i); + blit.dstSubresource.baseArrayLayer = 0; + blit.dstSubresource.layerCount = 1; + + c.vkCmdBlitImage(transfer_cb, image, c.VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image, c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, c.VK_FILTER_LINEAR); + + barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; + barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_READ_BIT; + barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); + + if (mip_width > 1) mip_width = @divFloor(mip_width, 2); + if (mip_height > 1) mip_height = @divFloor(mip_height, 2); + } + + barrier.subresourceRange.baseMipLevel = @intCast(mip_levels - 1); + barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); + } else { + barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); + } + } else { + const transfer_cb = try self.prepareTransfer(); + + var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); + barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.oldLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + barrier.subresourceRange.aspectMask = aspect_mask; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = mip_levels; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = 1; + barrier.srcAccessMask = 0; + barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + + c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); + } + + var view: c.VkImageView = null; + var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + view_info.image = image; + view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; + view_info.format = vk_format; + view_info.subresourceRange.aspectMask = aspect_mask; + view_info.subresourceRange.baseMipLevel = 0; + view_info.subresourceRange.levelCount = mip_levels; + view_info.subresourceRange.baseArrayLayer = 0; + view_info.subresourceRange.layerCount = 1; + + const sampler = try Utils.createSampler(self.vulkan_device, config, mip_levels, self.vulkan_device.max_anisotropy); + errdefer c.vkDestroySampler(device, sampler, null); + + try Utils.checkVk(c.vkCreateImageView(device, &view_info, null, &view)); + errdefer c.vkDestroyImageView(device, view, null); + + const handle = self.next_texture_handle; + self.next_texture_handle += 1; + try self.textures.put(handle, .{ + .image = image, + .memory = memory, + .view = view, + .sampler = sampler, + .width = width, + .height = height, + .depth = 1, + .format = format, + .config = config, + .is_3d = false, + .is_owned = true, + }); + + return handle; +} + +/// Creates a 3D texture resource. +/// Note: `config.generate_mipmaps` is currently forced off for 3D textures. +/// Other config parameters (filtering, wrapping, render-target flag) are respected. +pub fn createTexture3D(self: anytype, width: u32, height: u32, depth: u32, format: rhi.TextureFormat, config: rhi.TextureConfig, data_opt: ?[]const u8) rhi.RhiError!rhi.TextureHandle { + var texture_config = config; + if (texture_config.generate_mipmaps) { + std.log.warn("3D texture mipmaps are not supported yet; disabling generate_mipmaps", .{}); + texture_config.generate_mipmaps = false; + } + + const vk_format: c.VkFormat = switch (format) { + .rgba => c.VK_FORMAT_R8G8B8A8_UNORM, + .rgba_srgb => c.VK_FORMAT_R8G8B8A8_SRGB, + .rgb => c.VK_FORMAT_R8G8B8_UNORM, + .red => c.VK_FORMAT_R8_UNORM, + .depth => c.VK_FORMAT_D32_SFLOAT, + .rgba32f => c.VK_FORMAT_R32G32B32A32_SFLOAT, + }; + + if (format == .depth) return error.FormatNotSupported; + if (depth == 0) return error.InvalidState; + + var usage_flags: c.VkImageUsageFlags = c.VK_IMAGE_USAGE_TRANSFER_DST_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; + if (format == .rgba32f) usage_flags |= c.VK_IMAGE_USAGE_STORAGE_BIT; + if (texture_config.is_render_target) usage_flags |= c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + + var staging_offset: u64 = 0; + if (data_opt) |data| { + const staging = &self.staging_buffers[self.current_frame_index]; + const offset = staging.allocate(data.len) orelse return error.OutOfMemory; + if (staging.mapped_ptr == null) return error.OutOfMemory; + staging_offset = offset; + } + + const device = self.vulkan_device.vk_device; + + var image: c.VkImage = null; + var image_info = std.mem.zeroes(c.VkImageCreateInfo); + image_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.imageType = c.VK_IMAGE_TYPE_3D; + image_info.extent.width = width; + image_info.extent.height = height; + image_info.extent.depth = depth; + image_info.mipLevels = 1; + image_info.arrayLayers = 1; + image_info.format = vk_format; + image_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; + image_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + image_info.usage = usage_flags; + image_info.samples = c.VK_SAMPLE_COUNT_1_BIT; + image_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; + + try Utils.checkVk(c.vkCreateImage(device, &image_info, null, &image)); + errdefer c.vkDestroyImage(device, image, null); + + var mem_reqs: c.VkMemoryRequirements = undefined; + c.vkGetImageMemoryRequirements(device, image, &mem_reqs); + + var memory: c.VkDeviceMemory = null; + var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = mem_reqs.size; + alloc_info.memoryTypeIndex = try Utils.findMemoryType(self.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + try Utils.checkVk(c.vkAllocateMemory(device, &alloc_info, null, &memory)); + errdefer c.vkFreeMemory(device, memory, null); + try Utils.checkVk(c.vkBindImageMemory(device, image, memory, 0)); + + const transfer_cb = try self.prepareTransfer(); + var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); + barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.oldLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + barrier.newLayout = if (data_opt != null) c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL else c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + barrier.subresourceRange.aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = 1; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = 1; + barrier.srcAccessMask = 0; + barrier.dstAccessMask = if (data_opt != null) c.VK_ACCESS_TRANSFER_WRITE_BIT else c.VK_ACCESS_SHADER_READ_BIT; + + c.vkCmdPipelineBarrier( + transfer_cb, + c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + if (data_opt != null) c.VK_PIPELINE_STAGE_TRANSFER_BIT else c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + 0, + 0, + null, + 0, + null, + 1, + &barrier, + ); + + if (data_opt) |data| { + const staging = &self.staging_buffers[self.current_frame_index]; + if (staging.mapped_ptr == null) return error.OutOfMemory; + const dest = @as([*]u8, @ptrCast(staging.mapped_ptr.?)) + staging_offset; + @memcpy(dest[0..data.len], data); + + var region = std.mem.zeroes(c.VkBufferImageCopy); + region.bufferOffset = staging_offset; + region.imageSubresource.aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT; + region.imageSubresource.layerCount = 1; + region.imageExtent = .{ .width = width, .height = height, .depth = depth }; + c.vkCmdCopyBufferToImage(transfer_cb, staging.buffer, image, c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + + barrier.oldLayout = c.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcAccessMask = c.VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + c.vkCmdPipelineBarrier(transfer_cb, c.VK_PIPELINE_STAGE_TRANSFER_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, 1, &barrier); + } + + var view: c.VkImageView = null; + var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + view_info.image = image; + view_info.viewType = c.VK_IMAGE_VIEW_TYPE_3D; + view_info.format = vk_format; + view_info.subresourceRange.aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT; + view_info.subresourceRange.baseMipLevel = 0; + view_info.subresourceRange.levelCount = 1; + view_info.subresourceRange.baseArrayLayer = 0; + view_info.subresourceRange.layerCount = 1; + + const sampler = try Utils.createSampler(self.vulkan_device, texture_config, 1, self.vulkan_device.max_anisotropy); + errdefer c.vkDestroySampler(device, sampler, null); + + try Utils.checkVk(c.vkCreateImageView(device, &view_info, null, &view)); + errdefer c.vkDestroyImageView(device, view, null); + + const handle = self.next_texture_handle; + self.next_texture_handle += 1; + try self.textures.put(handle, .{ + .image = image, + .memory = memory, + .view = view, + .sampler = sampler, + .width = width, + .height = height, + .depth = depth, + .format = format, + .config = texture_config, + .is_3d = true, + .is_owned = true, + }); + + return handle; +} diff --git a/src/engine/graphics/vulkan/rhi_context_factory.zig b/src/engine/graphics/vulkan/rhi_context_factory.zig new file mode 100644 index 00000000..ca338514 --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_context_factory.zig @@ -0,0 +1,205 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const RenderDevice = @import("../render_device.zig").RenderDevice; +const Mat4 = @import("../../math/mat4.zig").Mat4; +const build_options = @import("build_options"); +const resource_manager_pkg = @import("resource_manager.zig"); +const VulkanBuffer = resource_manager_pkg.VulkanBuffer; +const TextureResource = resource_manager_pkg.TextureResource; +const ShadowSystem = @import("../shadow_system.zig").ShadowSystem; + +const MAX_FRAMES_IN_FLIGHT = rhi.MAX_FRAMES_IN_FLIGHT; + +pub fn createRHI( + comptime VulkanContext: type, + allocator: std.mem.Allocator, + window: *c.SDL_Window, + render_device: ?*RenderDevice, + shadow_resolution: u32, + msaa_samples: u8, + anisotropic_filtering: u8, + vtable: *const rhi.RHI.VTable, +) !rhi.RHI { + const ctx = try allocator.create(VulkanContext); + errdefer allocator.destroy(ctx); + @memset(std.mem.asBytes(ctx), 0); + + ctx.allocator = allocator; + ctx.render_device = render_device; + ctx.shadow_runtime.shadow_resolution = shadow_resolution; + ctx.window = window; + ctx.shadow_system = try ShadowSystem.init(allocator, shadow_resolution); + ctx.vulkan_device = .{ + .allocator = allocator, + }; + ctx.swapchain.swapchain = .{ + .device = &ctx.vulkan_device, + .window = window, + .allocator = allocator, + }; + ctx.runtime.framebuffer_resized = false; + + ctx.runtime.draw_call_count = 0; + ctx.resources.buffers = std.AutoHashMap(rhi.BufferHandle, VulkanBuffer).init(allocator); + ctx.resources.next_buffer_handle = 1; + ctx.resources.textures = std.AutoHashMap(rhi.TextureHandle, TextureResource).init(allocator); + ctx.resources.next_texture_handle = 1; + ctx.draw.current_texture = 0; + ctx.draw.current_normal_texture = 0; + ctx.draw.current_roughness_texture = 0; + ctx.draw.current_displacement_texture = 0; + ctx.draw.current_env_texture = 0; + ctx.draw.current_lpv_texture = 0; + ctx.draw.current_lpv_texture_g = 0; + ctx.draw.current_lpv_texture_b = 0; + ctx.draw.dummy_texture = 0; + ctx.draw.dummy_texture_3d = 0; + ctx.draw.dummy_normal_texture = 0; + ctx.draw.dummy_roughness_texture = 0; + ctx.mutex = .{}; + ctx.swapchain.swapchain.images = .empty; + ctx.swapchain.swapchain.image_views = .empty; + ctx.swapchain.swapchain.framebuffers = .empty; + ctx.runtime.clear_color = .{ 0.07, 0.08, 0.1, 1.0 }; + ctx.frames.frame_in_progress = false; + ctx.runtime.main_pass_active = false; + ctx.shadow_system.pass_active = false; + ctx.shadow_system.pass_index = 0; + ctx.ui.ui_in_progress = false; + ctx.ui.ui_mapped_ptr = null; + ctx.ui.ui_vertex_offset = 0; + ctx.runtime.frame_index = 0; + ctx.timing.timing_enabled = false; + ctx.timing.timing_results = std.mem.zeroes(rhi.GpuTimingResults); + ctx.frames.current_frame = 0; + ctx.frames.current_image_index = 0; + + ctx.draw.terrain_pipeline_bound = false; + ctx.shadow_system.pipeline_bound = false; + ctx.draw.descriptors_updated = false; + ctx.draw.bound_texture = 0; + ctx.draw.bound_normal_texture = 0; + ctx.draw.bound_roughness_texture = 0; + ctx.draw.bound_displacement_texture = 0; + ctx.draw.bound_env_texture = 0; + ctx.draw.bound_lpv_texture = 0; + ctx.draw.current_mask_radius = 0; + ctx.draw.lod_mode = false; + ctx.draw.pending_instance_buffer = 0; + ctx.draw.pending_lod_instance_buffer = 0; + + ctx.options.wireframe_enabled = false; + ctx.options.textures_enabled = true; + ctx.options.vsync_enabled = true; + ctx.options.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; + + const safe_mode_env = std.posix.getenv("ZIGCRAFT_SAFE_MODE"); + ctx.options.safe_mode = if (safe_mode_env) |val| + !(std.mem.eql(u8, val, "0") or std.mem.eql(u8, val, "false")) + else + false; + if (ctx.options.safe_mode) { + std.log.warn("ZIGCRAFT_SAFE_MODE enabled: throttling uploads and forcing GPU idle each frame", .{}); + } + + ctx.frames.command_pool = null; + ctx.resources.transfer_command_pool = null; + ctx.resources.transfer_ready = false; + ctx.swapchain.swapchain.main_render_pass = null; + ctx.swapchain.swapchain.handle = null; + ctx.swapchain.swapchain.depth_image = null; + ctx.swapchain.swapchain.depth_image_view = null; + ctx.swapchain.swapchain.depth_image_memory = null; + ctx.swapchain.swapchain.msaa_color_image = null; + ctx.swapchain.swapchain.msaa_color_view = null; + ctx.swapchain.swapchain.msaa_color_memory = null; + ctx.pipeline_manager.terrain_pipeline = null; + ctx.pipeline_manager.pipeline_layout = null; + ctx.pipeline_manager.wireframe_pipeline = null; + ctx.pipeline_manager.sky_pipeline = null; + ctx.pipeline_manager.sky_pipeline_layout = null; + ctx.pipeline_manager.ui_pipeline = null; + ctx.pipeline_manager.ui_pipeline_layout = null; + ctx.pipeline_manager.ui_tex_pipeline = null; + ctx.pipeline_manager.ui_tex_pipeline_layout = null; + ctx.pipeline_manager.ui_swapchain_pipeline = null; + ctx.pipeline_manager.ui_swapchain_tex_pipeline = null; + ctx.render_pass_manager.ui_swapchain_framebuffers = .empty; + if (comptime build_options.debug_shadows) { + ctx.debug_shadow.pipeline = null; + ctx.debug_shadow.pipeline_layout = null; + ctx.debug_shadow.descriptor_set_layout = null; + ctx.debug_shadow.vbo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.debug_shadow.descriptor_next = .{ 0, 0 }; + } + ctx.pipeline_manager.cloud_pipeline = null; + ctx.pipeline_manager.cloud_pipeline_layout = null; + ctx.cloud.cloud_vbo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.cloud.cloud_ebo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.cloud.cloud_mesh_size = 10000.0; + ctx.post_process = .{}; + ctx.descriptors.descriptor_pool = null; + ctx.descriptors.descriptor_set_layout = null; + ctx.runtime.memory_type_index = 0; + ctx.options.anisotropic_filtering = anisotropic_filtering; + ctx.options.msaa_samples = msaa_samples; + + ctx.shadow_system.shadow_image = null; + ctx.shadow_system.shadow_image_view = null; + ctx.shadow_system.shadow_image_memory = null; + ctx.shadow_system.shadow_sampler = null; + ctx.shadow_system.shadow_render_pass = null; + ctx.shadow_system.shadow_pipeline = null; + for (0..rhi.SHADOW_CASCADE_COUNT) |i| { + ctx.shadow_system.shadow_image_views[i] = null; + ctx.shadow_system.shadow_framebuffers[i] = null; + ctx.shadow_system.shadow_image_layouts[i] = c.VK_IMAGE_LAYOUT_UNDEFINED; + } + + for (0..MAX_FRAMES_IN_FLIGHT) |i| { + ctx.frames.image_available_semaphores[i] = null; + ctx.frames.render_finished_semaphores[i] = null; + ctx.frames.in_flight_fences[i] = null; + ctx.descriptors.global_ubos[i] = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.descriptors.shadow_ubos[i] = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.descriptors.shadow_ubos_mapped[i] = null; + ctx.ui.ui_vbos[i] = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.descriptors.descriptor_sets[i] = null; + ctx.descriptors.lod_descriptor_sets[i] = null; + ctx.ui.ui_tex_descriptor_sets[i] = null; + ctx.ui.ui_tex_descriptor_next[i] = 0; + ctx.draw.bound_instance_buffer[i] = 0; + ctx.draw.bound_lod_instance_buffer[i] = 0; + for (0..ctx.ui.ui_tex_descriptor_pool[i].len) |j| { + ctx.ui.ui_tex_descriptor_pool[i][j] = null; + } + if (comptime build_options.debug_shadows) { + ctx.debug_shadow.descriptor_sets[i] = null; + ctx.debug_shadow.descriptor_next[i] = 0; + for (0..ctx.debug_shadow.descriptor_pool[i].len) |j| { + ctx.debug_shadow.descriptor_pool[i][j] = null; + } + } + ctx.resources.buffer_deletion_queue[i] = .empty; + ctx.resources.image_deletion_queue[i] = .empty; + } + ctx.legacy.model_ubo = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.legacy.dummy_instance_buffer = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }; + ctx.ui.ui_screen_width = 0; + ctx.ui.ui_screen_height = 0; + ctx.ui.ui_flushed_vertex_count = 0; + ctx.cloud.cloud_vao = null; + ctx.legacy.dummy_shadow_image = null; + ctx.legacy.dummy_shadow_memory = null; + ctx.legacy.dummy_shadow_view = null; + ctx.draw.current_model = Mat4.identity; + ctx.draw.current_color = .{ 1.0, 1.0, 1.0 }; + ctx.draw.current_mask_radius = 0; + + return rhi.RHI{ + .ptr = ctx, + .vtable = vtable, + .device = render_device, + }; +} diff --git a/src/engine/graphics/vulkan/rhi_context_types.zig b/src/engine/graphics/vulkan/rhi_context_types.zig new file mode 100644 index 00000000..1e7ec1ad --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_context_types.zig @@ -0,0 +1,216 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const RenderDevice = @import("../render_device.zig").RenderDevice; +const Mat4 = @import("../../math/mat4.zig").Mat4; +const build_options = @import("build_options"); + +const resource_manager_pkg = @import("resource_manager.zig"); +const ResourceManager = resource_manager_pkg.ResourceManager; +const VulkanBuffer = resource_manager_pkg.VulkanBuffer; +const FrameManager = @import("frame_manager.zig").FrameManager; +const SwapchainPresenter = @import("swapchain_presenter.zig").SwapchainPresenter; +const DescriptorManager = @import("descriptor_manager.zig").DescriptorManager; +const PipelineManager = @import("pipeline_manager.zig").PipelineManager; +const RenderPassManager = @import("render_pass_manager.zig").RenderPassManager; +const ShadowSystem = @import("shadow_system.zig").ShadowSystem; +const SSAOSystem = @import("ssao_system.zig").SSAOSystem; +const PostProcessSystem = @import("post_process_system.zig").PostProcessSystem; +const FXAASystem = @import("fxaa_system.zig").FXAASystem; +const BloomSystem = @import("bloom_system.zig").BloomSystem; +const VulkanDevice = @import("device.zig").VulkanDevice; + +const MAX_FRAMES_IN_FLIGHT = rhi.MAX_FRAMES_IN_FLIGHT; + +const DebugShadowResources = if (build_options.debug_shadows) struct { + pipeline: ?c.VkPipeline = null, + pipeline_layout: ?c.VkPipelineLayout = null, + descriptor_set_layout: ?c.VkDescriptorSetLayout = null, + descriptor_sets: [MAX_FRAMES_IN_FLIGHT]?c.VkDescriptorSet = .{null} ** MAX_FRAMES_IN_FLIGHT, + descriptor_pool: [MAX_FRAMES_IN_FLIGHT][8]?c.VkDescriptorSet = .{.{null} ** 8} ** MAX_FRAMES_IN_FLIGHT, + descriptor_next: [MAX_FRAMES_IN_FLIGHT]u32 = .{0} ** MAX_FRAMES_IN_FLIGHT, + vbo: VulkanBuffer = .{ .buffer = null, .memory = null, .size = 0, .is_host_visible = false }, +} else struct {}; + +const GPassResources = struct { + g_normal_image: c.VkImage = null, + g_normal_memory: c.VkDeviceMemory = null, + g_normal_view: c.VkImageView = null, + g_normal_handle: rhi.TextureHandle = 0, + g_depth_image: c.VkImage = null, + g_depth_memory: c.VkDeviceMemory = null, + g_depth_view: c.VkImageView = null, + g_pass_extent: c.VkExtent2D = .{ .width = 0, .height = 0 }, +}; + +const CloudResources = struct { + cloud_vbo: VulkanBuffer = .{}, + cloud_ebo: VulkanBuffer = .{}, + cloud_mesh_size: f32 = 0.0, + cloud_vao: c.VkBuffer = null, +}; + +const HDRResources = struct { + hdr_image: c.VkImage = null, + hdr_memory: c.VkDeviceMemory = null, + hdr_view: c.VkImageView = null, + hdr_handle: rhi.TextureHandle = 0, + hdr_msaa_image: c.VkImage = null, + hdr_msaa_memory: c.VkDeviceMemory = null, + hdr_msaa_view: c.VkImageView = null, +}; + +const VelocityResources = struct { + velocity_image: c.VkImage = null, + velocity_memory: c.VkDeviceMemory = null, + velocity_view: c.VkImageView = null, + velocity_handle: rhi.TextureHandle = 0, + view_proj_prev: Mat4 = Mat4.identity, +}; + +const UIState = struct { + ui_tex_descriptor_sets: [MAX_FRAMES_IN_FLIGHT]c.VkDescriptorSet = .{null} ** MAX_FRAMES_IN_FLIGHT, + ui_tex_descriptor_pool: [MAX_FRAMES_IN_FLIGHT][64]c.VkDescriptorSet = .{.{null} ** 64} ** MAX_FRAMES_IN_FLIGHT, + ui_tex_descriptor_next: [MAX_FRAMES_IN_FLIGHT]u32 = .{0} ** MAX_FRAMES_IN_FLIGHT, + ui_vbos: [MAX_FRAMES_IN_FLIGHT]VulkanBuffer = .{VulkanBuffer{}} ** MAX_FRAMES_IN_FLIGHT, + ui_screen_width: f32 = 0.0, + ui_screen_height: f32 = 0.0, + ui_using_swapchain: bool = false, + ui_in_progress: bool = false, + ui_vertex_offset: u64 = 0, + selection_mode: bool = false, + ui_flushed_vertex_count: u32 = 0, + ui_mapped_ptr: ?*anyopaque = null, +}; + +const LegacyResources = struct { + dummy_shadow_image: c.VkImage = null, + dummy_shadow_memory: c.VkDeviceMemory = null, + dummy_shadow_view: c.VkImageView = null, + model_ubo: VulkanBuffer = .{}, + dummy_instance_buffer: VulkanBuffer = .{}, + transfer_fence: c.VkFence = null, +}; + +const ShadowRuntime = struct { + shadow_map_handles: [rhi.SHADOW_CASCADE_COUNT]rhi.TextureHandle = .{0} ** rhi.SHADOW_CASCADE_COUNT, + shadow_texel_sizes: [rhi.SHADOW_CASCADE_COUNT]f32 = .{0.0} ** rhi.SHADOW_CASCADE_COUNT, + shadow_resolution: u32, +}; + +const PostProcessState = struct { + vignette_enabled: bool = false, + vignette_intensity: f32 = 0.3, + film_grain_enabled: bool = false, + film_grain_intensity: f32 = 0.15, + color_grading_enabled: bool = false, + color_grading_intensity: f32 = 1.0, +}; + +const RenderOptions = struct { + wireframe_enabled: bool = false, + textures_enabled: bool = true, + vsync_enabled: bool = true, + present_mode: c.VkPresentModeKHR = c.VK_PRESENT_MODE_FIFO_KHR, + anisotropic_filtering: u8 = 1, + msaa_samples: u8 = 1, + safe_mode: bool = false, + debug_shadows_active: bool = false, +}; + +const DrawState = struct { + current_texture: rhi.TextureHandle, + current_normal_texture: rhi.TextureHandle, + current_roughness_texture: rhi.TextureHandle, + current_displacement_texture: rhi.TextureHandle, + current_env_texture: rhi.TextureHandle, + current_lpv_texture: rhi.TextureHandle, + current_lpv_texture_g: rhi.TextureHandle, + current_lpv_texture_b: rhi.TextureHandle, + dummy_texture: rhi.TextureHandle, + dummy_texture_3d: rhi.TextureHandle, + dummy_normal_texture: rhi.TextureHandle, + dummy_roughness_texture: rhi.TextureHandle, + bound_texture: rhi.TextureHandle, + bound_normal_texture: rhi.TextureHandle, + bound_roughness_texture: rhi.TextureHandle, + bound_displacement_texture: rhi.TextureHandle, + bound_env_texture: rhi.TextureHandle, + bound_lpv_texture: rhi.TextureHandle, + bound_lpv_texture_g: rhi.TextureHandle = 0, + bound_lpv_texture_b: rhi.TextureHandle = 0, + bound_ssao_handle: rhi.TextureHandle = 0, + bound_shadow_views: [rhi.SHADOW_CASCADE_COUNT]c.VkImageView, + descriptors_dirty: [MAX_FRAMES_IN_FLIGHT]bool, + terrain_pipeline_bound: bool = false, + descriptors_updated: bool = false, + lod_mode: bool = false, + bound_instance_buffer: [MAX_FRAMES_IN_FLIGHT]rhi.BufferHandle = .{ 0, 0 }, + bound_lod_instance_buffer: [MAX_FRAMES_IN_FLIGHT]rhi.BufferHandle = .{ 0, 0 }, + pending_instance_buffer: rhi.BufferHandle = 0, + pending_lod_instance_buffer: rhi.BufferHandle = 0, + current_view_proj: Mat4 = Mat4.identity, + current_model: Mat4 = Mat4.identity, + current_color: [3]f32 = .{ 1.0, 1.0, 1.0 }, + current_mask_radius: f32 = 0.0, +}; + +const RuntimeState = struct { + gpu_fault_detected: bool = false, + memory_type_index: u32, + framebuffer_resized: bool, + draw_call_count: u32, + main_pass_active: bool = false, + g_pass_active: bool = false, + ssao_pass_active: bool = false, + post_process_ran_this_frame: bool = false, + fxaa_ran_this_frame: bool = false, + pipeline_rebuild_needed: bool = false, + frame_index: usize, + image_index: u32, + clear_color: [4]f32 = .{ 0.07, 0.08, 0.1, 1.0 }, +}; + +const TimingState = struct { + query_pool: c.VkQueryPool = null, + timing_enabled: bool = true, + timing_results: rhi.GpuTimingResults = undefined, +}; + +pub const VulkanContext = struct { + allocator: std.mem.Allocator, + window: *c.SDL_Window, + render_device: ?*RenderDevice, + + vulkan_device: VulkanDevice, + resources: ResourceManager, + frames: FrameManager, + swapchain: SwapchainPresenter, + descriptors: DescriptorManager, + + pipeline_manager: PipelineManager = .{}, + render_pass_manager: RenderPassManager = .{}, + + legacy: LegacyResources = .{}, + draw: DrawState, + options: RenderOptions = .{}, + gpass: GPassResources = .{}, + + shadow_system: ShadowSystem, + ssao_system: SSAOSystem = .{}, + shadow_runtime: ShadowRuntime, + runtime: RuntimeState, + mutex: std.Thread.Mutex = .{}, + + ui: UIState = .{}, + cloud: CloudResources = .{}, + hdr: HDRResources = .{}, + post_process: PostProcessSystem = .{}, + debug_shadow: DebugShadowResources = .{}, + fxaa: FXAASystem = .{}, + bloom: BloomSystem = .{}, + post_process_state: PostProcessState = .{}, + velocity: VelocityResources = .{}, + + timing: TimingState = .{}, +}; diff --git a/src/engine/graphics/vulkan/rhi_draw_submission.zig b/src/engine/graphics/vulkan/rhi_draw_submission.zig new file mode 100644 index 00000000..f7f4165d --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_draw_submission.zig @@ -0,0 +1,344 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Mat4 = @import("../../math/mat4.zig").Mat4; +const pass_orchestration = @import("rhi_pass_orchestration.zig"); + +const ModelUniforms = extern struct { + model: Mat4, + color: [3]f32, + mask_radius: f32, +}; + +const ShadowModelUniforms = extern struct { + mvp: Mat4, + bias_params: [4]f32, +}; + +pub fn drawIndexed(ctx: anytype, vbo_handle: rhi.BufferHandle, ebo_handle: rhi.BufferHandle, count: u32) void { + if (!ctx.frames.frame_in_progress) return; + + if (!ctx.runtime.main_pass_active and !ctx.shadow_system.pass_active and !ctx.runtime.g_pass_active) pass_orchestration.beginMainPassInternal(ctx); + + if (!ctx.runtime.main_pass_active and !ctx.shadow_system.pass_active and !ctx.runtime.g_pass_active) return; + + const vbo_opt = ctx.resources.buffers.get(vbo_handle); + const ebo_opt = ctx.resources.buffers.get(ebo_handle); + + if (vbo_opt) |vbo| { + if (ebo_opt) |ebo| { + ctx.runtime.draw_call_count += 1; + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + if (!ctx.draw.terrain_pipeline_bound) { + const selected_pipeline = if (ctx.options.wireframe_enabled and ctx.pipeline_manager.wireframe_pipeline != null) + ctx.pipeline_manager.wireframe_pipeline + else + ctx.pipeline_manager.terrain_pipeline; + if (selected_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); + ctx.draw.terrain_pipeline_bound = true; + } + + const descriptor_set = if (ctx.draw.lod_mode) + &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] + else + &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.pipeline_layout, 0, 1, descriptor_set, 0, null); + + const offset: c.VkDeviceSize = 0; + c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &vbo.buffer, &offset); + c.vkCmdBindIndexBuffer(command_buffer, ebo.buffer, 0, c.VK_INDEX_TYPE_UINT16); + c.vkCmdDrawIndexed(command_buffer, count, 1, 0, 0, 0); + } + } +} + +pub fn drawIndirect(ctx: anytype, handle: rhi.BufferHandle, command_buffer: rhi.BufferHandle, offset: usize, draw_count: u32, stride: u32) void { + if (!ctx.frames.frame_in_progress) return; + + if (!ctx.runtime.main_pass_active and !ctx.shadow_system.pass_active and !ctx.runtime.g_pass_active) pass_orchestration.beginMainPassInternal(ctx); + + if (!ctx.runtime.main_pass_active and !ctx.shadow_system.pass_active and !ctx.runtime.g_pass_active) return; + + const use_shadow = ctx.shadow_system.pass_active; + const use_g_pass = ctx.runtime.g_pass_active; + + const vbo_opt = ctx.resources.buffers.get(handle); + const cmd_opt = ctx.resources.buffers.get(command_buffer); + + if (vbo_opt) |vbo| { + if (cmd_opt) |cmd| { + ctx.runtime.draw_call_count += 1; + const cb = ctx.frames.command_buffers[ctx.frames.current_frame]; + + if (use_shadow) { + if (!ctx.shadow_system.pipeline_bound) { + if (ctx.shadow_system.shadow_pipeline == null) return; + c.vkCmdBindPipeline(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.shadow_system.shadow_pipeline); + ctx.shadow_system.pipeline_bound = true; + } + } else if (use_g_pass) { + if (ctx.pipeline_manager.g_pipeline == null) return; + c.vkCmdBindPipeline(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.g_pipeline); + } else { + if (!ctx.draw.terrain_pipeline_bound) { + const selected_pipeline = if (ctx.options.wireframe_enabled and ctx.pipeline_manager.wireframe_pipeline != null) + ctx.pipeline_manager.wireframe_pipeline + else + ctx.pipeline_manager.terrain_pipeline; + if (selected_pipeline == null) { + std.log.warn("drawIndirect: main pipeline (selected_pipeline) is null - cannot draw terrain", .{}); + return; + } + c.vkCmdBindPipeline(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); + ctx.draw.terrain_pipeline_bound = true; + } + } + + const descriptor_set = if (!use_shadow and ctx.draw.lod_mode) + &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] + else + &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + c.vkCmdBindDescriptorSets(cb, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.pipeline_layout, 0, 1, descriptor_set, 0, null); + + if (use_shadow) { + const cascade_index = ctx.shadow_system.pass_index; + const texel_size = ctx.shadow_runtime.shadow_texel_sizes[cascade_index]; + const shadow_uniforms = ShadowModelUniforms{ + .mvp = ctx.shadow_system.pass_matrix, + .bias_params = .{ 2.0, 1.0, @floatFromInt(cascade_index), texel_size }, + }; + c.vkCmdPushConstants(cb, ctx.pipeline_manager.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ShadowModelUniforms), &shadow_uniforms); + } else { + const uniforms = ModelUniforms{ + .model = Mat4.identity, + .color = .{ 1.0, 1.0, 1.0 }, + .mask_radius = 0, + }; + c.vkCmdPushConstants(cb, ctx.pipeline_manager.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ModelUniforms), &uniforms); + } + + const offset_vals = [_]c.VkDeviceSize{0}; + c.vkCmdBindVertexBuffers(cb, 0, 1, &vbo.buffer, &offset_vals); + + if (cmd.is_host_visible and draw_count > 0 and stride > 0) { + const stride_bytes: usize = @intCast(stride); + const map_size: usize = @as(usize, @intCast(draw_count)) * stride_bytes; + const cmd_size: usize = @intCast(cmd.size); + if (offset <= cmd_size and map_size <= cmd_size - offset) { + if (cmd.mapped_ptr) |ptr| { + const base = @as([*]const u8, @ptrCast(ptr)) + offset; + var draw_index: u32 = 0; + while (draw_index < draw_count) : (draw_index += 1) { + const cmd_ptr = @as(*const rhi.DrawIndirectCommand, @ptrCast(@alignCast(base + @as(usize, draw_index) * stride_bytes))); + const draw_cmd = cmd_ptr.*; + if (draw_cmd.vertexCount == 0 or draw_cmd.instanceCount == 0) continue; + c.vkCmdDraw(cb, draw_cmd.vertexCount, draw_cmd.instanceCount, draw_cmd.firstVertex, draw_cmd.firstInstance); + } + return; + } + } else { + std.log.warn("drawIndirect: command buffer range out of bounds (offset={}, size={}, buffer={})", .{ offset, map_size, cmd_size }); + } + } + + if (ctx.vulkan_device.multi_draw_indirect) { + c.vkCmdDrawIndirect(cb, cmd.buffer, @intCast(offset), draw_count, stride); + } else { + const stride_bytes: usize = @intCast(stride); + var draw_index: u32 = 0; + while (draw_index < draw_count) : (draw_index += 1) { + const draw_offset = offset + @as(usize, draw_index) * stride_bytes; + c.vkCmdDrawIndirect(cb, cmd.buffer, @intCast(draw_offset), 1, stride); + } + std.log.info("drawIndirect: MDI unsupported - drew {} draws via single-draw fallback", .{draw_count}); + } + } + } +} + +pub fn drawInstance(ctx: anytype, handle: rhi.BufferHandle, count: u32, instance_index: u32) void { + if (!ctx.frames.frame_in_progress) return; + + if (!ctx.runtime.main_pass_active and !ctx.shadow_system.pass_active and !ctx.runtime.g_pass_active) pass_orchestration.beginMainPassInternal(ctx); + + const use_shadow = ctx.shadow_system.pass_active; + const use_g_pass = ctx.runtime.g_pass_active; + + const vbo_opt = ctx.resources.buffers.get(handle); + + if (vbo_opt) |vbo| { + ctx.runtime.draw_call_count += 1; + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + if (use_shadow) { + if (!ctx.shadow_system.pipeline_bound) { + if (ctx.shadow_system.shadow_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.shadow_system.shadow_pipeline); + ctx.shadow_system.pipeline_bound = true; + } + } else if (use_g_pass) { + if (ctx.pipeline_manager.g_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.g_pipeline); + } else { + if (!ctx.draw.terrain_pipeline_bound) { + const selected_pipeline = if (ctx.options.wireframe_enabled and ctx.pipeline_manager.wireframe_pipeline != null) + ctx.pipeline_manager.wireframe_pipeline + else + ctx.pipeline_manager.terrain_pipeline; + if (selected_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); + ctx.draw.terrain_pipeline_bound = true; + } + } + + const descriptor_set = if (!use_shadow and ctx.draw.lod_mode) + &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] + else + &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.pipeline_layout, 0, 1, descriptor_set, 0, null); + + if (use_shadow) { + const cascade_index = ctx.shadow_system.pass_index; + const texel_size = ctx.shadow_runtime.shadow_texel_sizes[cascade_index]; + const shadow_uniforms = ShadowModelUniforms{ + .mvp = ctx.shadow_system.pass_matrix.multiply(ctx.draw.current_model), + .bias_params = .{ 2.0, 1.0, @floatFromInt(cascade_index), texel_size }, + }; + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ShadowModelUniforms), &shadow_uniforms); + } else { + const uniforms = ModelUniforms{ + .model = Mat4.identity, + .color = .{ 1.0, 1.0, 1.0 }, + .mask_radius = 0, + }; + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ModelUniforms), &uniforms); + } + + const offset: c.VkDeviceSize = 0; + c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &vbo.buffer, &offset); + c.vkCmdDraw(command_buffer, count, 1, 0, instance_index); + } +} + +pub fn drawOffset(ctx: anytype, handle: rhi.BufferHandle, count: u32, mode: rhi.DrawMode, offset: usize) void { + if (!ctx.frames.frame_in_progress) return; + + if (ctx.post_process.pass_active) { + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdDraw(command_buffer, count, 1, 0, 0); + ctx.runtime.draw_call_count += 1; + return; + } + + if (!ctx.runtime.main_pass_active and !ctx.shadow_system.pass_active and !ctx.runtime.g_pass_active) pass_orchestration.beginMainPassInternal(ctx); + + if (!ctx.runtime.main_pass_active and !ctx.shadow_system.pass_active and !ctx.runtime.g_pass_active) return; + + const use_shadow = ctx.shadow_system.pass_active; + const use_g_pass = ctx.runtime.g_pass_active; + + const vbo_opt = ctx.resources.buffers.get(handle); + + if (vbo_opt) |vbo| { + const vertex_stride: u64 = @sizeOf(rhi.Vertex); + const required_bytes: u64 = @as(u64, offset) + @as(u64, count) * vertex_stride; + if (required_bytes > vbo.size) { + std.log.err("drawOffset: vertex buffer overrun (handle={}, offset={}, count={}, size={})", .{ handle, offset, count, vbo.size }); + return; + } + + ctx.runtime.draw_call_count += 1; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + if (use_shadow) { + if (!ctx.shadow_system.pipeline_bound) { + if (ctx.shadow_system.shadow_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.shadow_system.shadow_pipeline); + ctx.shadow_system.pipeline_bound = true; + } + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.pipeline_layout, 0, 1, &ctx.descriptors.descriptor_sets[ctx.frames.current_frame], 0, null); + } else if (use_g_pass) { + if (ctx.pipeline_manager.g_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.g_pipeline); + + const descriptor_set = if (ctx.draw.lod_mode) + &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] + else + &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.pipeline_layout, 0, 1, descriptor_set, 0, null); + } else { + const needs_rebinding = !ctx.draw.terrain_pipeline_bound or ctx.ui.selection_mode or mode == .lines; + if (needs_rebinding) { + const selected_pipeline = if (ctx.ui.selection_mode and ctx.pipeline_manager.selection_pipeline != null) + ctx.pipeline_manager.selection_pipeline + else if (mode == .lines and ctx.pipeline_manager.line_pipeline != null) + ctx.pipeline_manager.line_pipeline + else if (ctx.options.wireframe_enabled and ctx.pipeline_manager.wireframe_pipeline != null) + ctx.pipeline_manager.wireframe_pipeline + else + ctx.pipeline_manager.terrain_pipeline; + if (selected_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, selected_pipeline); + ctx.draw.terrain_pipeline_bound = (selected_pipeline == ctx.pipeline_manager.terrain_pipeline); + } + + const descriptor_set = if (ctx.draw.lod_mode) + &ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame] + else + &ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.pipeline_layout, 0, 1, descriptor_set, 0, null); + } + + if (use_shadow) { + const cascade_index = ctx.shadow_system.pass_index; + const texel_size = ctx.shadow_runtime.shadow_texel_sizes[cascade_index]; + const shadow_uniforms = ShadowModelUniforms{ + .mvp = ctx.shadow_system.pass_matrix.multiply(ctx.draw.current_model), + .bias_params = .{ 2.0, 1.0, @floatFromInt(cascade_index), texel_size }, + }; + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ShadowModelUniforms), &shadow_uniforms); + } else { + const uniforms = ModelUniforms{ + .model = ctx.draw.current_model, + .color = ctx.draw.current_color, + .mask_radius = ctx.draw.current_mask_radius, + }; + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(ModelUniforms), &uniforms); + } + + const offset_vbo: c.VkDeviceSize = @intCast(offset); + c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &vbo.buffer, &offset_vbo); + c.vkCmdDraw(command_buffer, count, 1, 0, 0); + } +} + +pub fn bindBuffer(ctx: anytype, handle: rhi.BufferHandle, usage: rhi.BufferUsage) void { + if (!ctx.frames.frame_in_progress) return; + + const buf_opt = ctx.resources.buffers.get(handle); + + if (buf_opt) |buf| { + const cb = ctx.frames.command_buffers[ctx.frames.current_frame]; + const offset: c.VkDeviceSize = 0; + switch (usage) { + .vertex => c.vkCmdBindVertexBuffers(cb, 0, 1, &buf.buffer, &offset), + .index => c.vkCmdBindIndexBuffer(cb, buf.buffer, 0, c.VK_INDEX_TYPE_UINT16), + else => {}, + } + } +} + +pub fn pushConstants(ctx: anytype, stages: rhi.ShaderStageFlags, offset: u32, size: u32, data: *const anyopaque) void { + if (!ctx.frames.frame_in_progress) return; + + var vk_stages: c.VkShaderStageFlags = 0; + if (stages.vertex) vk_stages |= c.VK_SHADER_STAGE_VERTEX_BIT; + if (stages.fragment) vk_stages |= c.VK_SHADER_STAGE_FRAGMENT_BIT; + if (stages.compute) vk_stages |= c.VK_SHADER_STAGE_COMPUTE_BIT; + + const cb = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdPushConstants(cb, ctx.pipeline_manager.pipeline_layout, vk_stages, offset, size, data); +} diff --git a/src/engine/graphics/vulkan/rhi_frame_orchestration.zig b/src/engine/graphics/vulkan/rhi_frame_orchestration.zig new file mode 100644 index 00000000..232a2993 --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_frame_orchestration.zig @@ -0,0 +1,284 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const build_options = @import("build_options"); +const bindings = @import("descriptor_bindings.zig"); +const lifecycle = @import("rhi_resource_lifecycle.zig"); +const setup = @import("rhi_resource_setup.zig"); + +pub fn recreateSwapchainInternal(ctx: anytype) void { + _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); + + var w: c_int = 0; + var h: c_int = 0; + _ = c.SDL_GetWindowSizeInPixels(ctx.window, &w, &h); + if (w == 0 or h == 0) return; + + setup.destroyMainRenderPassAndPipelines(ctx); + lifecycle.destroyHDRResources(ctx); + lifecycle.destroyFXAAResources(ctx); + lifecycle.destroyBloomResources(ctx); + lifecycle.destroyPostProcessResources(ctx); + lifecycle.destroyGPassResources(ctx); + + ctx.runtime.main_pass_active = false; + ctx.shadow_system.pass_active = false; + ctx.runtime.g_pass_active = false; + ctx.runtime.ssao_pass_active = false; + + ctx.swapchain.recreate() catch |err| { + std.log.err("Failed to recreate swapchain: {}", .{err}); + return; + }; + + lifecycle.createHDRResources(ctx) catch |err| { + std.log.err("Failed to recreate HDR resources: {}", .{err}); + return; + }; + setup.createGPassResources(ctx) catch |err| { + std.log.err("Failed to recreate G-Pass resources: {}", .{err}); + return; + }; + setup.createSSAOResources(ctx) catch |err| { + std.log.err("Failed to recreate SSAO resources: {}", .{err}); + return; + }; + ctx.render_pass_manager.createMainRenderPass(ctx.vulkan_device.vk_device, ctx.swapchain.getExtent(), ctx.options.msaa_samples) catch |err| { + std.log.err("Failed to recreate render pass: {}", .{err}); + return; + }; + ctx.pipeline_manager.createMainPipelines(ctx.allocator, ctx.vulkan_device.vk_device, ctx.render_pass_manager.hdr_render_pass, ctx.render_pass_manager.g_render_pass, ctx.options.msaa_samples) catch |err| { + std.log.err("Failed to recreate pipelines: {}", .{err}); + return; + }; + setup.createPostProcessResources(ctx) catch |err| { + std.log.err("Failed to recreate post-process resources: {}", .{err}); + return; + }; + setup.createSwapchainUIResources(ctx) catch |err| { + std.log.err("Failed to recreate swapchain UI resources: {}", .{err}); + return; + }; + ctx.fxaa.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.swapchain.getExtent(), ctx.swapchain.getImageFormat(), ctx.post_process.sampler, ctx.swapchain.getImageViews()) catch |err| { + std.log.err("Failed to recreate FXAA resources: {}", .{err}); + return; + }; + ctx.pipeline_manager.createSwapchainUIPipelines(ctx.allocator, ctx.vulkan_device.vk_device, ctx.render_pass_manager.ui_swapchain_render_pass) catch |err| { + std.log.err("Failed to recreate swapchain UI pipelines: {}", .{err}); + return; + }; + ctx.bloom.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.hdr.hdr_view, ctx.swapchain.getExtent().width, ctx.swapchain.getExtent().height, c.VK_FORMAT_R16G16B16A16_SFLOAT) catch |err| { + std.log.err("Failed to recreate Bloom resources: {}", .{err}); + return; + }; + setup.updatePostProcessDescriptorsWithBloom(ctx); + + { + var list: [32]c.VkImage = undefined; + var count: usize = 0; + const candidates = [_]c.VkImage{ ctx.hdr.hdr_image, ctx.gpass.g_normal_image, ctx.ssao_system.image, ctx.ssao_system.blur_image, ctx.ssao_system.noise_image, ctx.velocity.velocity_image }; + for (candidates) |img| { + if (img != null) { + list[count] = img; + count += 1; + } + } + for (ctx.bloom.mip_images) |img| { + if (img != null) { + list[count] = img; + count += 1; + } + } + + if (count > 0) { + lifecycle.transitionImagesToShaderRead(ctx, list[0..count], false) catch |err| std.log.warn("Failed to transition images: {}", .{err}); + } + + if (ctx.gpass.g_depth_image != null) { + lifecycle.transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.gpass.g_depth_image}, true) catch |err| std.log.warn("Failed to transition G-depth image: {}", .{err}); + } + if (ctx.shadow_system.shadow_image != null) { + lifecycle.transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.shadow_system.shadow_image}, true) catch |err| std.log.warn("Failed to transition Shadow image: {}", .{err}); + for (0..rhi.SHADOW_CASCADE_COUNT) |i| { + ctx.shadow_system.shadow_image_layouts[i] = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + } + } + } + + ctx.runtime.framebuffer_resized = false; + ctx.runtime.pipeline_rebuild_needed = false; +} + +pub fn prepareFrameState(ctx: anytype) void { + ctx.runtime.draw_call_count = 0; + ctx.runtime.main_pass_active = false; + ctx.shadow_system.pass_active = false; + ctx.runtime.post_process_ran_this_frame = false; + ctx.runtime.fxaa_ran_this_frame = false; + ctx.ui.ui_using_swapchain = false; + + ctx.draw.terrain_pipeline_bound = false; + ctx.shadow_system.pipeline_bound = false; + ctx.draw.descriptors_updated = false; + ctx.draw.bound_texture = 0; + + const command_buffer = ctx.frames.getCurrentCommandBuffer(); + + var mem_barrier = std.mem.zeroes(c.VkMemoryBarrier); + mem_barrier.sType = c.VK_STRUCTURE_TYPE_MEMORY_BARRIER; + mem_barrier.srcAccessMask = c.VK_ACCESS_HOST_WRITE_BIT | c.VK_ACCESS_TRANSFER_WRITE_BIT; + mem_barrier.dstAccessMask = c.VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | c.VK_ACCESS_INDEX_READ_BIT | c.VK_ACCESS_SHADER_READ_BIT | c.VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + c.vkCmdPipelineBarrier( + command_buffer, + c.VK_PIPELINE_STAGE_HOST_BIT | c.VK_PIPELINE_STAGE_TRANSFER_BIT, + c.VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | c.VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | c.VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, + 0, + 1, + &mem_barrier, + 0, + null, + 0, + null, + ); + + ctx.ui.ui_vertex_offset = 0; + ctx.ui.ui_flushed_vertex_count = 0; + ctx.ui.ui_tex_descriptor_next[ctx.frames.current_frame] = 0; + if (comptime build_options.debug_shadows) { + ctx.debug_shadow.descriptor_next[ctx.frames.current_frame] = 0; + } + + const cur_tex = ctx.draw.current_texture; + const cur_nor = ctx.draw.current_normal_texture; + const cur_rou = ctx.draw.current_roughness_texture; + const cur_dis = ctx.draw.current_displacement_texture; + const cur_env = ctx.draw.current_env_texture; + const cur_lpv = ctx.draw.current_lpv_texture; + const cur_lpv_g = ctx.draw.current_lpv_texture_g; + const cur_lpv_b = ctx.draw.current_lpv_texture_b; + + var needs_update = false; + if (ctx.draw.bound_texture != cur_tex) needs_update = true; + if (ctx.draw.bound_normal_texture != cur_nor) needs_update = true; + if (ctx.draw.bound_roughness_texture != cur_rou) needs_update = true; + if (ctx.draw.bound_displacement_texture != cur_dis) needs_update = true; + if (ctx.draw.bound_env_texture != cur_env) needs_update = true; + if (ctx.draw.bound_lpv_texture != cur_lpv) needs_update = true; + if (ctx.draw.bound_lpv_texture_g != cur_lpv_g) needs_update = true; + if (ctx.draw.bound_lpv_texture_b != cur_lpv_b) needs_update = true; + + for (0..rhi.SHADOW_CASCADE_COUNT) |si| { + if (ctx.draw.bound_shadow_views[si] != ctx.shadow_system.shadow_image_views[si]) needs_update = true; + } + + if (needs_update) { + for (0..rhi.MAX_FRAMES_IN_FLIGHT) |i| ctx.draw.descriptors_dirty[i] = true; + ctx.draw.bound_texture = cur_tex; + ctx.draw.bound_normal_texture = cur_nor; + ctx.draw.bound_roughness_texture = cur_rou; + ctx.draw.bound_displacement_texture = cur_dis; + ctx.draw.bound_env_texture = cur_env; + ctx.draw.bound_lpv_texture = cur_lpv; + ctx.draw.bound_lpv_texture_g = cur_lpv_g; + ctx.draw.bound_lpv_texture_b = cur_lpv_b; + for (0..rhi.SHADOW_CASCADE_COUNT) |si| ctx.draw.bound_shadow_views[si] = ctx.shadow_system.shadow_image_views[si]; + } + + if (ctx.draw.descriptors_dirty[ctx.frames.current_frame]) { + if (ctx.descriptors.descriptor_sets[ctx.frames.current_frame] == null) { + std.log.err("CRITICAL: Descriptor set for frame {} is NULL!", .{ctx.frames.current_frame}); + return; + } + var writes: [14]c.VkWriteDescriptorSet = undefined; + var write_count: u32 = 0; + var image_infos: [14]c.VkDescriptorImageInfo = undefined; + var info_count: u32 = 0; + + const dummy_tex_entry = ctx.resources.textures.get(ctx.draw.dummy_texture); + const dummy_tex_3d_entry = ctx.resources.textures.get(ctx.draw.dummy_texture_3d); + + const atlas_slots = [_]struct { handle: rhi.TextureHandle, binding: u32, is_3d: bool }{ + .{ .handle = cur_tex, .binding = bindings.ALBEDO_TEXTURE, .is_3d = false }, + .{ .handle = cur_nor, .binding = bindings.NORMAL_TEXTURE, .is_3d = false }, + .{ .handle = cur_rou, .binding = bindings.ROUGHNESS_TEXTURE, .is_3d = false }, + .{ .handle = cur_dis, .binding = bindings.DISPLACEMENT_TEXTURE, .is_3d = false }, + .{ .handle = cur_env, .binding = bindings.ENV_TEXTURE, .is_3d = false }, + .{ .handle = cur_lpv, .binding = bindings.LPV_TEXTURE, .is_3d = true }, + .{ .handle = cur_lpv_g, .binding = bindings.LPV_TEXTURE_G, .is_3d = true }, + .{ .handle = cur_lpv_b, .binding = bindings.LPV_TEXTURE_B, .is_3d = true }, + }; + + for (atlas_slots) |slot| { + const fallback = if (slot.is_3d) dummy_tex_3d_entry else dummy_tex_entry; + const entry = ctx.resources.textures.get(slot.handle) orelse fallback; + if (entry) |tex| { + image_infos[info_count] = .{ + .sampler = tex.sampler, + .imageView = tex.view, + .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + }; + writes[write_count] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[write_count].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[write_count].dstSet = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + writes[write_count].dstBinding = slot.binding; + writes[write_count].descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + writes[write_count].descriptorCount = 1; + writes[write_count].pImageInfo = &image_infos[info_count]; + write_count += 1; + info_count += 1; + } + } + + if (ctx.shadow_system.shadow_sampler == null) { + std.log.err("CRITICAL: Shadow sampler is NULL!", .{}); + } + if (ctx.shadow_system.shadow_sampler_regular == null) { + std.log.err("CRITICAL: Shadow regular sampler is NULL!", .{}); + } + if (ctx.shadow_system.shadow_image_view == null) { + std.log.err("CRITICAL: Shadow image view is NULL!", .{}); + } + image_infos[info_count] = .{ + .sampler = ctx.shadow_system.shadow_sampler, + .imageView = ctx.shadow_system.shadow_image_view, + .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + }; + writes[write_count] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[write_count].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[write_count].dstSet = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + writes[write_count].dstBinding = bindings.SHADOW_COMPARE_TEXTURE; + writes[write_count].descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + writes[write_count].descriptorCount = 1; + writes[write_count].pImageInfo = &image_infos[info_count]; + write_count += 1; + info_count += 1; + + image_infos[info_count] = .{ + .sampler = if (ctx.shadow_system.shadow_sampler_regular != null) ctx.shadow_system.shadow_sampler_regular else ctx.shadow_system.shadow_sampler, + .imageView = ctx.shadow_system.shadow_image_view, + .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + }; + writes[write_count] = std.mem.zeroes(c.VkWriteDescriptorSet); + writes[write_count].sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writes[write_count].dstSet = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + writes[write_count].dstBinding = bindings.SHADOW_REGULAR_TEXTURE; + writes[write_count].descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + writes[write_count].descriptorCount = 1; + writes[write_count].pImageInfo = &image_infos[info_count]; + write_count += 1; + info_count += 1; + + if (write_count > 0) { + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, write_count, &writes[0], 0, null); + + for (0..write_count) |i| { + writes[i].dstSet = ctx.descriptors.lod_descriptor_sets[ctx.frames.current_frame]; + } + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, write_count, &writes[0], 0, null); + } + + ctx.draw.descriptors_dirty[ctx.frames.current_frame] = false; + } + + ctx.draw.descriptors_updated = true; +} diff --git a/src/engine/graphics/vulkan/rhi_init_deinit.zig b/src/engine/graphics/vulkan/rhi_init_deinit.zig new file mode 100644 index 00000000..39242791 --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_init_deinit.zig @@ -0,0 +1,256 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const RenderDevice = @import("../render_device.zig").RenderDevice; +const VulkanDevice = @import("device.zig").VulkanDevice; +const ResourceManager = @import("resource_manager.zig").ResourceManager; +const FrameManager = @import("frame_manager.zig").FrameManager; +const SwapchainPresenter = @import("swapchain_presenter.zig").SwapchainPresenter; +const DescriptorManager = @import("descriptor_manager.zig").DescriptorManager; +const PipelineManager = @import("pipeline_manager.zig").PipelineManager; +const RenderPassManager = @import("render_pass_manager.zig").RenderPassManager; +const ShadowSystem = @import("shadow_system.zig").ShadowSystem; +const Utils = @import("utils.zig"); +const lifecycle = @import("rhi_resource_lifecycle.zig"); +const setup = @import("rhi_resource_setup.zig"); +const rhi_timing = @import("rhi_timing.zig"); + +const MAX_FRAMES_IN_FLIGHT = rhi.MAX_FRAMES_IN_FLIGHT; +const TOTAL_QUERY_COUNT = rhi_timing.QUERY_COUNT_PER_FRAME * MAX_FRAMES_IN_FLIGHT; + +pub fn initContext(ctx: anytype, allocator: std.mem.Allocator, render_device: ?*RenderDevice) !void { + ctx.allocator = allocator; + ctx.render_device = render_device; + + ctx.vulkan_device = try VulkanDevice.init(allocator, ctx.window); + ctx.vulkan_device.initDebugMessenger(); + ctx.resources = try ResourceManager.init(allocator, &ctx.vulkan_device); + ctx.frames = try FrameManager.init(&ctx.vulkan_device); + ctx.swapchain = try SwapchainPresenter.init(allocator, &ctx.vulkan_device, ctx.window, ctx.options.msaa_samples); + ctx.descriptors = try DescriptorManager.init(allocator, &ctx.vulkan_device, &ctx.resources); + + ctx.pipeline_manager = try PipelineManager.init(&ctx.vulkan_device, &ctx.descriptors, null); + ctx.render_pass_manager = RenderPassManager.init(ctx.allocator); + + ctx.shadow_system = try ShadowSystem.init(allocator, ctx.shadow_runtime.shadow_resolution); + + ctx.legacy.dummy_shadow_image = null; + ctx.legacy.dummy_shadow_memory = null; + ctx.legacy.dummy_shadow_view = null; + ctx.runtime.clear_color = .{ 0.07, 0.08, 0.1, 1.0 }; + ctx.frames.frame_in_progress = false; + ctx.runtime.main_pass_active = false; + ctx.shadow_system.pass_active = false; + ctx.shadow_system.pass_index = 0; + ctx.ui.ui_in_progress = false; + ctx.ui.ui_mapped_ptr = null; + ctx.ui.ui_vertex_offset = 0; + + ctx.draw.terrain_pipeline_bound = false; + ctx.shadow_system.pipeline_bound = false; + ctx.draw.descriptors_updated = false; + ctx.draw.bound_texture = 0; + ctx.draw.bound_normal_texture = 0; + ctx.draw.bound_roughness_texture = 0; + ctx.draw.bound_displacement_texture = 0; + ctx.draw.bound_env_texture = 0; + ctx.draw.current_mask_radius = 0; + ctx.draw.lod_mode = false; + ctx.draw.pending_instance_buffer = 0; + ctx.draw.pending_lod_instance_buffer = 0; + + ctx.options.wireframe_enabled = false; + ctx.options.textures_enabled = true; + ctx.options.vsync_enabled = true; + ctx.options.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; + + const safe_mode_env = std.posix.getenv("ZIGCRAFT_SAFE_MODE"); + ctx.options.safe_mode = if (safe_mode_env) |val| + !(std.mem.eql(u8, val, "0") or std.mem.eql(u8, val, "false")) + else + false; + if (ctx.options.safe_mode) { + std.log.warn("ZIGCRAFT_SAFE_MODE enabled: throttling uploads and forcing GPU idle each frame", .{}); + } + + try setup.createShadowResources(ctx); + try lifecycle.createHDRResources(ctx); + try setup.createGPassResources(ctx); + try setup.createSSAOResources(ctx); + + try ctx.render_pass_manager.createMainRenderPass( + ctx.vulkan_device.vk_device, + ctx.swapchain.getExtent(), + ctx.options.msaa_samples, + ); + + try ctx.pipeline_manager.createMainPipelines( + ctx.allocator, + ctx.vulkan_device.vk_device, + ctx.render_pass_manager.hdr_render_pass, + ctx.render_pass_manager.g_render_pass, + ctx.options.msaa_samples, + ); + + try setup.createPostProcessResources(ctx); + try setup.createSwapchainUIResources(ctx); + + try ctx.fxaa.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.swapchain.getExtent(), ctx.swapchain.getImageFormat(), ctx.post_process.sampler, ctx.swapchain.getImageViews()); + try ctx.pipeline_manager.createSwapchainUIPipelines(ctx.allocator, ctx.vulkan_device.vk_device, ctx.render_pass_manager.ui_swapchain_render_pass); + try ctx.bloom.init(&ctx.vulkan_device, ctx.allocator, ctx.descriptors.descriptor_pool, ctx.hdr.hdr_view, ctx.swapchain.getExtent().width, ctx.swapchain.getExtent().height, c.VK_FORMAT_R16G16B16A16_SFLOAT); + + setup.updatePostProcessDescriptorsWithBloom(ctx); + + ctx.draw.dummy_texture = ctx.descriptors.dummy_texture; + ctx.draw.dummy_texture_3d = ctx.descriptors.dummy_texture_3d; + ctx.draw.dummy_normal_texture = ctx.descriptors.dummy_normal_texture; + ctx.draw.dummy_roughness_texture = ctx.descriptors.dummy_roughness_texture; + ctx.draw.current_texture = ctx.draw.dummy_texture; + ctx.draw.current_normal_texture = ctx.draw.dummy_normal_texture; + ctx.draw.current_roughness_texture = ctx.draw.dummy_roughness_texture; + ctx.draw.current_displacement_texture = ctx.draw.dummy_roughness_texture; + ctx.draw.current_env_texture = ctx.draw.dummy_texture; + ctx.draw.current_lpv_texture = ctx.draw.dummy_texture_3d; + ctx.draw.current_lpv_texture_g = ctx.draw.dummy_texture_3d; + ctx.draw.current_lpv_texture_b = ctx.draw.dummy_texture_3d; + + const cloud_vbo_handle = try ctx.resources.createBuffer(8 * @sizeOf(f32), .vertex); + std.log.info("Cloud VBO handle: {}, map count: {}", .{ cloud_vbo_handle, ctx.resources.buffers.count() }); + if (cloud_vbo_handle == 0) { + std.log.err("Failed to create cloud VBO", .{}); + return error.InitializationFailed; + } + const cloud_buf = ctx.resources.buffers.get(cloud_vbo_handle); + if (cloud_buf == null) { + std.log.err("Cloud VBO created but not found in map!", .{}); + return error.InitializationFailed; + } + ctx.cloud.cloud_vbo = cloud_buf.?; + + for (0..MAX_FRAMES_IN_FLIGHT) |i| { + ctx.ui.ui_vbos[i] = try Utils.createVulkanBuffer(&ctx.vulkan_device, 1024 * 1024, c.VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, c.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | c.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + } + + for (0..MAX_FRAMES_IN_FLIGHT) |i| { + ctx.draw.descriptors_dirty[i] = true; + for (0..64) |j| { + var alloc_info = std.mem.zeroes(c.VkDescriptorSetAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + alloc_info.descriptorPool = ctx.descriptors.descriptor_pool; + alloc_info.descriptorSetCount = 1; + alloc_info.pSetLayouts = &ctx.pipeline_manager.ui_tex_descriptor_set_layout; + const result = c.vkAllocateDescriptorSets(ctx.vulkan_device.vk_device, &alloc_info, &ctx.ui.ui_tex_descriptor_pool[i][j]); + if (result != c.VK_SUCCESS) { + std.log.err("Failed to allocate UI texture descriptor set [{}][{}]: error {}. Pool state: maxSets={}, available may be exhausted by FXAA+Bloom+UI", .{ i, j, result, @as(u32, 1000) }); + } + } + ctx.ui.ui_tex_descriptor_next[i] = 0; + } + + try ctx.resources.flushTransfer(); + ctx.resources.setCurrentFrame(0); + + if (ctx.shadow_system.shadow_image != null) { + try lifecycle.transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.shadow_system.shadow_image}, true); + for (0..rhi.SHADOW_CASCADE_COUNT) |i| { + ctx.shadow_system.shadow_image_layouts[i] = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + } + } + + { + var list: [32]c.VkImage = undefined; + var count: usize = 0; + const candidates = [_]c.VkImage{ ctx.hdr.hdr_image, ctx.gpass.g_normal_image, ctx.ssao_system.image, ctx.ssao_system.blur_image, ctx.ssao_system.noise_image, ctx.velocity.velocity_image }; + for (candidates) |img| { + if (img != null) { + list[count] = img; + count += 1; + } + } + for (ctx.bloom.mip_images) |img| { + if (img != null) { + list[count] = img; + count += 1; + } + } + + if (count > 0) { + lifecycle.transitionImagesToShaderRead(ctx, list[0..count], false) catch |err| std.log.err("Failed to transition images during init: {}", .{err}); + } + + if (ctx.gpass.g_depth_image != null) { + lifecycle.transitionImagesToShaderRead(ctx, &[_]c.VkImage{ctx.gpass.g_depth_image}, true) catch |err| std.log.err("Failed to transition G-depth image during init: {}", .{err}); + } + } + + var query_pool_info = std.mem.zeroes(c.VkQueryPoolCreateInfo); + query_pool_info.sType = c.VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; + query_pool_info.queryType = c.VK_QUERY_TYPE_TIMESTAMP; + query_pool_info.queryCount = TOTAL_QUERY_COUNT; + try Utils.checkVk(c.vkCreateQueryPool(ctx.vulkan_device.vk_device, &query_pool_info, null, &ctx.timing.query_pool)); +} + +pub fn deinit(ctx: anytype) void { + const vk_device: c.VkDevice = ctx.vulkan_device.vk_device; + + if (vk_device != null) { + _ = c.vkDeviceWaitIdle(vk_device); + + if (ctx.render_pass_manager.main_framebuffer != null) { + c.vkDestroyFramebuffer(vk_device, ctx.render_pass_manager.main_framebuffer, null); + ctx.render_pass_manager.main_framebuffer = null; + } + + ctx.pipeline_manager.deinit(vk_device); + ctx.render_pass_manager.deinit(vk_device); + + lifecycle.destroyHDRResources(ctx); + lifecycle.destroyFXAAResources(ctx); + lifecycle.destroyBloomResources(ctx); + lifecycle.destroyVelocityResources(ctx); + lifecycle.destroyPostProcessResources(ctx); + lifecycle.destroyGPassResources(ctx); + + const device = ctx.vulkan_device.vk_device; + { + if (ctx.legacy.model_ubo.buffer != null) c.vkDestroyBuffer(device, ctx.legacy.model_ubo.buffer, null); + if (ctx.legacy.model_ubo.memory != null) c.vkFreeMemory(device, ctx.legacy.model_ubo.memory, null); + + if (ctx.legacy.dummy_instance_buffer.buffer != null) c.vkDestroyBuffer(device, ctx.legacy.dummy_instance_buffer.buffer, null); + if (ctx.legacy.dummy_instance_buffer.memory != null) c.vkFreeMemory(device, ctx.legacy.dummy_instance_buffer.memory, null); + + for (ctx.ui.ui_vbos) |buf| { + if (buf.buffer != null) c.vkDestroyBuffer(device, buf.buffer, null); + if (buf.memory != null) c.vkFreeMemory(device, buf.memory, null); + } + } + + if (comptime @import("build_options").debug_shadows) { + if (ctx.debug_shadow.vbo.buffer != null) c.vkDestroyBuffer(device, ctx.debug_shadow.vbo.buffer, null); + if (ctx.debug_shadow.vbo.memory != null) c.vkFreeMemory(device, ctx.debug_shadow.vbo.memory, null); + } + + ctx.resources.destroyTexture(ctx.draw.dummy_texture); + ctx.resources.destroyTexture(ctx.draw.dummy_texture_3d); + ctx.resources.destroyTexture(ctx.draw.dummy_normal_texture); + ctx.resources.destroyTexture(ctx.draw.dummy_roughness_texture); + if (ctx.legacy.dummy_shadow_view != null) c.vkDestroyImageView(ctx.vulkan_device.vk_device, ctx.legacy.dummy_shadow_view, null); + if (ctx.legacy.dummy_shadow_image != null) c.vkDestroyImage(ctx.vulkan_device.vk_device, ctx.legacy.dummy_shadow_image, null); + if (ctx.legacy.dummy_shadow_memory != null) c.vkFreeMemory(ctx.vulkan_device.vk_device, ctx.legacy.dummy_shadow_memory, null); + + ctx.shadow_system.deinit(ctx.vulkan_device.vk_device); + + ctx.descriptors.deinit(); + ctx.swapchain.deinit(); + ctx.frames.deinit(); + ctx.resources.deinit(); + + if (ctx.timing.query_pool != null) { + c.vkDestroyQueryPool(ctx.vulkan_device.vk_device, ctx.timing.query_pool, null); + } + + ctx.vulkan_device.deinit(); + } + + ctx.allocator.destroy(ctx); +} diff --git a/src/engine/graphics/vulkan/rhi_native_access.zig b/src/engine/graphics/vulkan/rhi_native_access.zig new file mode 100644 index 00000000..1378ff8a --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_native_access.zig @@ -0,0 +1,32 @@ +pub fn getNativeSkyPipeline(ctx: anytype) u64 { + return @intFromPtr(ctx.pipeline_manager.sky_pipeline); +} + +pub fn getNativeSkyPipelineLayout(ctx: anytype) u64 { + return @intFromPtr(ctx.pipeline_manager.sky_pipeline_layout); +} + +pub fn getNativeCloudPipeline(ctx: anytype) u64 { + return @intFromPtr(ctx.pipeline_manager.cloud_pipeline); +} + +pub fn getNativeCloudPipelineLayout(ctx: anytype) u64 { + return @intFromPtr(ctx.pipeline_manager.cloud_pipeline_layout); +} + +pub fn getNativeMainDescriptorSet(ctx: anytype) u64 { + return @intFromPtr(ctx.descriptors.descriptor_sets[ctx.frames.current_frame]); +} + +pub fn getNativeCommandBuffer(ctx: anytype) u64 { + return @intFromPtr(ctx.frames.command_buffers[ctx.frames.current_frame]); +} + +pub fn getNativeSwapchainExtent(ctx: anytype) [2]u32 { + const extent = ctx.swapchain.getExtent(); + return .{ extent.width, extent.height }; +} + +pub fn getNativeDevice(ctx: anytype) u64 { + return @intFromPtr(ctx.vulkan_device.vk_device); +} diff --git a/src/engine/graphics/vulkan/rhi_pass_orchestration.zig b/src/engine/graphics/vulkan/rhi_pass_orchestration.zig new file mode 100644 index 00000000..e11a1cc2 --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_pass_orchestration.zig @@ -0,0 +1,400 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const post_process_system_pkg = @import("post_process_system.zig"); +const PostProcessPushConstants = post_process_system_pkg.PostProcessPushConstants; +const fxaa_system_pkg = @import("fxaa_system.zig"); +const FXAAPushConstants = fxaa_system_pkg.FXAAPushConstants; +const setup = @import("rhi_resource_setup.zig"); + +pub fn beginGPassInternal(ctx: anytype) void { + if (!ctx.frames.frame_in_progress or ctx.runtime.g_pass_active) return; + + if (ctx.render_pass_manager.g_render_pass == null or ctx.render_pass_manager.g_framebuffer == null or ctx.pipeline_manager.g_pipeline == null) { + std.log.warn("beginGPass: skipping - resources null (rp={}, fb={}, pipeline={})", .{ ctx.render_pass_manager.g_render_pass != null, ctx.render_pass_manager.g_framebuffer != null, ctx.pipeline_manager.g_pipeline != null }); + return; + } + + if (ctx.gpass.g_pass_extent.width != ctx.swapchain.getExtent().width or ctx.gpass.g_pass_extent.height != ctx.swapchain.getExtent().height) { + std.log.warn("beginGPass: size mismatch! G-pass={}x{}, swapchain={}x{} - recreating", .{ ctx.gpass.g_pass_extent.width, ctx.gpass.g_pass_extent.height, ctx.swapchain.getExtent().width, ctx.swapchain.getExtent().height }); + _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); + setup.createGPassResources(ctx) catch |err| { + std.log.err("Failed to recreate G-pass resources: {}", .{err}); + return; + }; + setup.createSSAOResources(ctx) catch |err| { + std.log.err("Failed to recreate SSAO resources: {}", .{err}); + return; + }; + } + + ensureNoRenderPassActiveInternal(ctx); + + ctx.runtime.g_pass_active = true; + const current_frame = ctx.frames.current_frame; + const command_buffer = ctx.frames.command_buffers[current_frame]; + + if (command_buffer == null or ctx.pipeline_manager.pipeline_layout == null) { + std.log.err("beginGPass: invalid command state (cb={}, layout={})", .{ command_buffer != null, ctx.pipeline_manager.pipeline_layout != null }); + return; + } + + var render_pass_info = std.mem.zeroes(c.VkRenderPassBeginInfo); + render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + render_pass_info.renderPass = ctx.render_pass_manager.g_render_pass; + render_pass_info.framebuffer = ctx.render_pass_manager.g_framebuffer; + render_pass_info.renderArea.offset = .{ .x = 0, .y = 0 }; + render_pass_info.renderArea.extent = ctx.swapchain.getExtent(); + + var clear_values: [3]c.VkClearValue = undefined; + clear_values[0] = std.mem.zeroes(c.VkClearValue); + clear_values[0].color = .{ .float32 = .{ 0, 0, 0, 1 } }; + clear_values[1] = std.mem.zeroes(c.VkClearValue); + clear_values[1].color = .{ .float32 = .{ 0, 0, 0, 1 } }; + clear_values[2] = std.mem.zeroes(c.VkClearValue); + clear_values[2].depthStencil = .{ .depth = 0.0, .stencil = 0 }; + render_pass_info.clearValueCount = 3; + render_pass_info.pClearValues = &clear_values[0]; + + c.vkCmdBeginRenderPass(command_buffer, &render_pass_info, c.VK_SUBPASS_CONTENTS_INLINE); + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.g_pipeline); + + const viewport = c.VkViewport{ .x = 0, .y = 0, .width = @floatFromInt(ctx.swapchain.getExtent().width), .height = @floatFromInt(ctx.swapchain.getExtent().height), .minDepth = 0, .maxDepth = 1 }; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = ctx.swapchain.getExtent() }; + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + const ds = ctx.descriptors.descriptor_sets[ctx.frames.current_frame]; + if (ds == null) std.log.err("CRITICAL: descriptor_set is NULL for frame {}", .{ctx.frames.current_frame}); + + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.pipeline_layout, 0, 1, &ds, 0, null); +} + +pub fn endGPassInternal(ctx: anytype) void { + if (!ctx.runtime.g_pass_active) return; + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdEndRenderPass(command_buffer); + ctx.runtime.g_pass_active = false; +} + +pub fn beginFXAAPassInternal(ctx: anytype) void { + if (!ctx.fxaa.enabled) return; + if (ctx.fxaa.pass_active) return; + if (ctx.fxaa.pipeline == null) return; + if (ctx.fxaa.render_pass == null) return; + + const image_index = ctx.frames.current_image_index; + if (image_index >= ctx.fxaa.framebuffers.items.len) return; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + const extent = ctx.swapchain.getExtent(); + + var clear_value = std.mem.zeroes(c.VkClearValue); + clear_value.color.float32 = .{ 0.0, 0.0, 0.0, 1.0 }; + + var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); + rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + rp_begin.renderPass = ctx.fxaa.render_pass; + rp_begin.framebuffer = ctx.fxaa.framebuffers.items[image_index]; + rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; + rp_begin.clearValueCount = 1; + rp_begin.pClearValues = &clear_value; + + c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); + + const viewport = c.VkViewport{ + .x = 0, + .y = 0, + .width = @floatFromInt(extent.width), + .height = @floatFromInt(extent.height), + .minDepth = 0.0, + .maxDepth = 1.0, + }; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.fxaa.pipeline); + + const frame = ctx.frames.current_frame; + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.fxaa.pipeline_layout, 0, 1, &ctx.fxaa.descriptor_sets[frame], 0, null); + + const push = FXAAPushConstants{ + .texel_size = .{ 1.0 / @as(f32, @floatFromInt(extent.width)), 1.0 / @as(f32, @floatFromInt(extent.height)) }, + .fxaa_span_max = 8.0, + .fxaa_reduce_mul = 1.0 / 8.0, + }; + c.vkCmdPushConstants(command_buffer, ctx.fxaa.pipeline_layout, c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(FXAAPushConstants), &push); + + c.vkCmdDraw(command_buffer, 3, 1, 0, 0); + ctx.runtime.draw_call_count += 1; + + ctx.runtime.fxaa_ran_this_frame = true; + ctx.fxaa.pass_active = true; +} + +pub fn beginFXAAPassForUI(ctx: anytype) void { + if (!ctx.frames.frame_in_progress) return; + if (ctx.fxaa.pass_active) return; + if (ctx.render_pass_manager.ui_swapchain_render_pass == null) return; + if (ctx.render_pass_manager.ui_swapchain_framebuffers.items.len == 0) return; + + const image_index = ctx.frames.current_image_index; + if (image_index >= ctx.render_pass_manager.ui_swapchain_framebuffers.items.len) return; + + ensureNoRenderPassActiveInternal(ctx); + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + const extent = ctx.swapchain.getExtent(); + + var clear_value = std.mem.zeroes(c.VkClearValue); + clear_value.color.float32 = .{ 0.0, 0.0, 0.0, 1.0 }; + + var rp_begin = std.mem.zeroes(c.VkRenderPassBeginInfo); + rp_begin.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + rp_begin.renderPass = ctx.render_pass_manager.ui_swapchain_render_pass.?; + rp_begin.framebuffer = ctx.render_pass_manager.ui_swapchain_framebuffers.items[image_index]; + rp_begin.renderArea = .{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; + rp_begin.clearValueCount = 1; + rp_begin.pClearValues = &clear_value; + + c.vkCmdBeginRenderPass(command_buffer, &rp_begin, c.VK_SUBPASS_CONTENTS_INLINE); + + const viewport = c.VkViewport{ + .x = 0, + .y = 0, + .width = @floatFromInt(extent.width), + .height = @floatFromInt(extent.height), + .minDepth = 0.0, + .maxDepth = 1.0, + }; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = extent }; + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + ctx.fxaa.pass_active = true; +} + +pub fn endFXAAPassInternal(ctx: anytype) void { + if (!ctx.fxaa.pass_active) return; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdEndRenderPass(command_buffer); + + ctx.fxaa.pass_active = false; +} + +pub fn beginMainPassInternal(ctx: anytype) void { + if (!ctx.frames.frame_in_progress) return; + if (ctx.swapchain.getExtent().width == 0 or ctx.swapchain.getExtent().height == 0) return; + + if (ctx.render_pass_manager.hdr_render_pass == null) { + ctx.render_pass_manager.createMainRenderPass(ctx.vulkan_device.vk_device, ctx.swapchain.getExtent(), ctx.options.msaa_samples) catch |err| { + std.log.err("beginMainPass: failed to recreate render pass: {}", .{err}); + return; + }; + } + if (ctx.render_pass_manager.main_framebuffer == null) { + setup.createMainFramebuffers(ctx) catch |err| { + std.log.err("beginMainPass: failed to recreate framebuffer: {}", .{err}); + return; + }; + } + if (ctx.render_pass_manager.main_framebuffer == null) return; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + if (!ctx.runtime.main_pass_active) { + ensureNoRenderPassActiveInternal(ctx); + + if (ctx.hdr.hdr_image != null) { + var barrier = std.mem.zeroes(c.VkImageMemoryBarrier); + barrier.sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.oldLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.newLayout = c.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + barrier.srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barrier.image = ctx.hdr.hdr_image; + barrier.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; + barrier.srcAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + barrier.dstAccessMask = c.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + + c.vkCmdPipelineBarrier(command_buffer, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, c.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, null, 0, null, 1, &barrier); + } + + ctx.draw.terrain_pipeline_bound = false; + + var render_pass_info = std.mem.zeroes(c.VkRenderPassBeginInfo); + render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + render_pass_info.renderPass = ctx.render_pass_manager.hdr_render_pass; + render_pass_info.framebuffer = ctx.render_pass_manager.main_framebuffer; + render_pass_info.renderArea.offset = .{ .x = 0, .y = 0 }; + render_pass_info.renderArea.extent = ctx.swapchain.getExtent(); + + var clear_values: [3]c.VkClearValue = undefined; + clear_values[0] = std.mem.zeroes(c.VkClearValue); + clear_values[0].color = .{ .float32 = ctx.runtime.clear_color }; + clear_values[1] = std.mem.zeroes(c.VkClearValue); + clear_values[1].depthStencil = .{ .depth = 0.0, .stencil = 0 }; + + if (ctx.options.msaa_samples > 1) { + clear_values[2] = std.mem.zeroes(c.VkClearValue); + clear_values[2].color = .{ .float32 = ctx.runtime.clear_color }; + render_pass_info.clearValueCount = 3; + } else { + render_pass_info.clearValueCount = 2; + } + render_pass_info.pClearValues = &clear_values[0]; + + c.vkCmdBeginRenderPass(command_buffer, &render_pass_info, c.VK_SUBPASS_CONTENTS_INLINE); + ctx.runtime.main_pass_active = true; + ctx.draw.lod_mode = false; + } + + var viewport = std.mem.zeroes(c.VkViewport); + viewport.x = 0.0; + viewport.y = 0.0; + viewport.width = @floatFromInt(ctx.swapchain.getExtent().width); + viewport.height = @floatFromInt(ctx.swapchain.getExtent().height); + viewport.minDepth = 0.0; + viewport.maxDepth = 1.0; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + var scissor = std.mem.zeroes(c.VkRect2D); + scissor.offset = .{ .x = 0, .y = 0 }; + scissor.extent = ctx.swapchain.getExtent(); + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); +} + +pub fn endMainPassInternal(ctx: anytype) void { + if (!ctx.runtime.main_pass_active) return; + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdEndRenderPass(command_buffer); + ctx.runtime.main_pass_active = false; +} + +pub fn beginPostProcessPassInternal(ctx: anytype) void { + if (!ctx.frames.frame_in_progress) return; + if (ctx.render_pass_manager.post_process_framebuffers.items.len == 0) return; + if (ctx.frames.current_image_index >= ctx.render_pass_manager.post_process_framebuffers.items.len) return; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + if (!ctx.post_process.pass_active) { + ensureNoRenderPassActiveInternal(ctx); + + const use_fxaa_output = ctx.fxaa.enabled and ctx.fxaa.post_process_to_fxaa_render_pass != null and ctx.fxaa.post_process_to_fxaa_framebuffer != null; + + var render_pass_info = std.mem.zeroes(c.VkRenderPassBeginInfo); + render_pass_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + + if (use_fxaa_output) { + render_pass_info.renderPass = ctx.fxaa.post_process_to_fxaa_render_pass; + render_pass_info.framebuffer = ctx.fxaa.post_process_to_fxaa_framebuffer; + } else { + render_pass_info.renderPass = ctx.render_pass_manager.post_process_render_pass; + render_pass_info.framebuffer = ctx.render_pass_manager.post_process_framebuffers.items[ctx.frames.current_image_index]; + } + + render_pass_info.renderArea.offset = .{ .x = 0, .y = 0 }; + render_pass_info.renderArea.extent = ctx.swapchain.getExtent(); + + var clear_value = std.mem.zeroes(c.VkClearValue); + clear_value.color = .{ .float32 = .{ 0, 0, 0, 1 } }; + render_pass_info.clearValueCount = 1; + render_pass_info.pClearValues = &clear_value; + + c.vkCmdBeginRenderPass(command_buffer, &render_pass_info, c.VK_SUBPASS_CONTENTS_INLINE); + ctx.post_process.pass_active = true; + ctx.runtime.post_process_ran_this_frame = true; + + if (ctx.post_process.pipeline == null) { + std.log.err("Post-process pipeline is null, skipping draw", .{}); + return; + } + + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.post_process.pipeline); + + const pp_ds = ctx.post_process.descriptor_sets[ctx.frames.current_frame]; + if (pp_ds == null) { + std.log.err("Post-process descriptor set is null for frame {}", .{ctx.frames.current_frame}); + return; + } + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.post_process.pipeline_layout, 0, 1, &pp_ds, 0, null); + + const push = PostProcessPushConstants{ + .bloom_enabled = if (ctx.bloom.enabled) 1.0 else 0.0, + .bloom_intensity = ctx.bloom.intensity, + .vignette_intensity = if (ctx.post_process_state.vignette_enabled) ctx.post_process_state.vignette_intensity else 0.0, + .film_grain_intensity = if (ctx.post_process_state.film_grain_enabled) ctx.post_process_state.film_grain_intensity else 0.0, + .color_grading_enabled = if (ctx.post_process_state.color_grading_enabled) 1.0 else 0.0, + .color_grading_intensity = ctx.post_process_state.color_grading_intensity, + }; + c.vkCmdPushConstants(command_buffer, ctx.post_process.pipeline_layout, c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(PostProcessPushConstants), &push); + + var viewport = std.mem.zeroes(c.VkViewport); + viewport.x = 0.0; + viewport.y = 0.0; + viewport.width = @floatFromInt(ctx.swapchain.getExtent().width); + viewport.height = @floatFromInt(ctx.swapchain.getExtent().height); + viewport.minDepth = 0.0; + viewport.maxDepth = 1.0; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + var scissor = std.mem.zeroes(c.VkRect2D); + scissor.offset = .{ .x = 0, .y = 0 }; + scissor.extent = ctx.swapchain.getExtent(); + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); + } +} + +pub fn endPostProcessPassInternal(ctx: anytype) void { + if (!ctx.post_process.pass_active) return; + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdEndRenderPass(command_buffer); + ctx.post_process.pass_active = false; +} + +pub fn ensureNoRenderPassActiveInternal(ctx: anytype) void { + if (ctx.runtime.main_pass_active) endMainPassInternal(ctx); + if (ctx.shadow_system.pass_active) { + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + ctx.shadow_system.endPass(command_buffer); + } + if (ctx.runtime.g_pass_active) endGPassInternal(ctx); + if (ctx.post_process.pass_active) endPostProcessPassInternal(ctx); +} + +pub fn endFrame(ctx: anytype) void { + if (!ctx.frames.frame_in_progress) return; + + if (ctx.runtime.main_pass_active) endMainPassInternal(ctx); + if (ctx.shadow_system.pass_active) { + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + ctx.shadow_system.endPass(command_buffer); + } + + if (!ctx.runtime.post_process_ran_this_frame and ctx.render_pass_manager.post_process_framebuffers.items.len > 0 and ctx.frames.current_image_index < ctx.render_pass_manager.post_process_framebuffers.items.len) { + beginPostProcessPassInternal(ctx); + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdDraw(command_buffer, 3, 1, 0, 0); + ctx.runtime.draw_call_count += 1; + } + if (ctx.post_process.pass_active) endPostProcessPassInternal(ctx); + + if (ctx.fxaa.enabled and ctx.runtime.post_process_ran_this_frame and !ctx.runtime.fxaa_ran_this_frame) { + beginFXAAPassInternal(ctx); + } + if (ctx.fxaa.pass_active) endFXAAPassInternal(ctx); + + const transfer_cb = ctx.resources.getTransferCommandBuffer(); + + ctx.frames.endFrame(&ctx.swapchain, transfer_cb) catch |err| { + std.log.err("endFrame failed: {}", .{err}); + }; + + if (transfer_cb != null) { + ctx.resources.resetTransferState(); + } + + ctx.runtime.frame_index += 1; +} diff --git a/src/engine/graphics/vulkan/rhi_render_state.zig b/src/engine/graphics/vulkan/rhi_render_state.zig new file mode 100644 index 00000000..d6db6b3a --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_render_state.zig @@ -0,0 +1,152 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Mat4 = @import("../../math/mat4.zig").Mat4; +const Vec3 = @import("../../math/vec3.zig").Vec3; +const bindings = @import("descriptor_bindings.zig"); +const pass_orchestration = @import("rhi_pass_orchestration.zig"); + +const GlobalUniforms = extern struct { + view_proj: Mat4, + view_proj_prev: Mat4, + cam_pos: [4]f32, + sun_dir: [4]f32, + sun_color: [4]f32, + fog_color: [4]f32, + cloud_wind_offset: [4]f32, + params: [4]f32, + lighting: [4]f32, + cloud_params: [4]f32, + pbr_params: [4]f32, + volumetric_params: [4]f32, + viewport_size: [4]f32, + lpv_params: [4]f32, + lpv_origin: [4]f32, +}; + +const CloudPushConstants = extern struct { + view_proj: [4][4]f32, + camera_pos: [4]f32, + cloud_params: [4]f32, + sun_params: [4]f32, + fog_params: [4]f32, +}; + +pub fn updateGlobalUniforms(ctx: anytype, view_proj: Mat4, cam_pos: Vec3, sun_dir: Vec3, sun_color: Vec3, time_val: f32, fog_color: Vec3, fog_density: f32, fog_enabled: bool, sun_intensity: f32, ambient: f32, use_texture: bool, cloud_params: rhi.CloudParams) !void { + const global_uniforms = GlobalUniforms{ + .view_proj = view_proj, + .view_proj_prev = ctx.velocity.view_proj_prev, + .cam_pos = .{ cam_pos.x, cam_pos.y, cam_pos.z, 1.0 }, + .sun_dir = .{ sun_dir.x, sun_dir.y, sun_dir.z, 0.0 }, + .sun_color = .{ sun_color.x, sun_color.y, sun_color.z, 1.0 }, + .fog_color = .{ fog_color.x, fog_color.y, fog_color.z, 1.0 }, + .cloud_wind_offset = .{ cloud_params.wind_offset_x, cloud_params.wind_offset_z, cloud_params.cloud_scale, cloud_params.cloud_coverage }, + .params = .{ time_val, fog_density, if (fog_enabled) 1.0 else 0.0, sun_intensity }, + .lighting = .{ ambient, if (use_texture) 1.0 else 0.0, if (cloud_params.pbr_enabled) 1.0 else 0.0, cloud_params.shadow.strength }, + .cloud_params = .{ cloud_params.cloud_height, @floatFromInt(cloud_params.shadow.pcf_samples), if (cloud_params.shadow.cascade_blend) 1.0 else 0.0, if (cloud_params.cloud_shadows) 1.0 else 0.0 }, + .pbr_params = .{ @floatFromInt(cloud_params.pbr_quality), cloud_params.exposure, cloud_params.saturation, if (cloud_params.ssao_enabled) 1.0 else 0.0 }, + .volumetric_params = .{ if (cloud_params.volumetric_enabled) 1.0 else 0.0, cloud_params.volumetric_density, @floatFromInt(cloud_params.volumetric_steps), cloud_params.volumetric_scattering }, + .viewport_size = .{ @floatFromInt(ctx.swapchain.swapchain.extent.width), @floatFromInt(ctx.swapchain.swapchain.extent.height), if (ctx.options.debug_shadows_active) 1.0 else 0.0, 0.0 }, + .lpv_params = .{ if (cloud_params.lpv_enabled) 1.0 else 0.0, cloud_params.lpv_intensity, cloud_params.lpv_cell_size, @floatFromInt(cloud_params.lpv_grid_size) }, + .lpv_origin = .{ cloud_params.lpv_origin.x, cloud_params.lpv_origin.y, cloud_params.lpv_origin.z, 0.0 }, + }; + + try ctx.descriptors.updateGlobalUniforms(ctx.frames.current_frame, &global_uniforms); + ctx.velocity.view_proj_prev = view_proj; +} + +pub fn setModelMatrix(ctx: anytype, model: Mat4, color: Vec3, mask_radius: f32) void { + ctx.draw.current_model = model; + ctx.draw.current_color = .{ color.x, color.y, color.z }; + ctx.draw.current_mask_radius = mask_radius; +} + +pub fn setInstanceBuffer(ctx: anytype, handle: rhi.BufferHandle) void { + if (!ctx.frames.frame_in_progress) return; + ctx.draw.pending_instance_buffer = handle; + ctx.draw.lod_mode = false; + applyPendingDescriptorUpdates(ctx, ctx.frames.current_frame); +} + +pub fn setLODInstanceBuffer(ctx: anytype, handle: rhi.BufferHandle) void { + if (!ctx.frames.frame_in_progress) return; + ctx.draw.pending_lod_instance_buffer = handle; + ctx.draw.lod_mode = true; + applyPendingDescriptorUpdates(ctx, ctx.frames.current_frame); +} + +pub fn setSelectionMode(ctx: anytype, enabled: bool) void { + ctx.ui.selection_mode = enabled; +} + +pub fn applyPendingDescriptorUpdates(ctx: anytype, frame_index: usize) void { + if (ctx.draw.pending_instance_buffer != 0 and ctx.draw.bound_instance_buffer[frame_index] != ctx.draw.pending_instance_buffer) { + const buf_opt = ctx.resources.buffers.get(ctx.draw.pending_instance_buffer); + + if (buf_opt) |buf| { + var buffer_info = c.VkDescriptorBufferInfo{ + .buffer = buf.buffer, + .offset = 0, + .range = buf.size, + }; + + var write = std.mem.zeroes(c.VkWriteDescriptorSet); + write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write.dstSet = ctx.descriptors.descriptor_sets[frame_index]; + write.dstBinding = bindings.INSTANCE_SSBO; + write.descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + write.descriptorCount = 1; + write.pBufferInfo = &buffer_info; + + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write, 0, null); + ctx.draw.bound_instance_buffer[frame_index] = ctx.draw.pending_instance_buffer; + } + } + + if (ctx.draw.pending_lod_instance_buffer != 0 and ctx.draw.bound_lod_instance_buffer[frame_index] != ctx.draw.pending_lod_instance_buffer) { + const buf_opt = ctx.resources.buffers.get(ctx.draw.pending_lod_instance_buffer); + + if (buf_opt) |buf| { + var buffer_info = c.VkDescriptorBufferInfo{ + .buffer = buf.buffer, + .offset = 0, + .range = buf.size, + }; + + var write = std.mem.zeroes(c.VkWriteDescriptorSet); + write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write.dstSet = ctx.descriptors.lod_descriptor_sets[frame_index]; + write.dstBinding = bindings.INSTANCE_SSBO; + write.descriptorType = c.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + write.descriptorCount = 1; + write.pBufferInfo = &buffer_info; + + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write, 0, null); + ctx.draw.bound_lod_instance_buffer[frame_index] = ctx.draw.pending_lod_instance_buffer; + } + } +} + +pub fn beginCloudPass(ctx: anytype, params: rhi.CloudParams) void { + if (!ctx.frames.frame_in_progress) return; + + if (!ctx.runtime.main_pass_active) pass_orchestration.beginMainPassInternal(ctx); + if (!ctx.runtime.main_pass_active) return; + + if (ctx.pipeline_manager.cloud_pipeline == null) return; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.cloud_pipeline); + ctx.draw.terrain_pipeline_bound = false; + + const pc = CloudPushConstants{ + .view_proj = params.view_proj.data, + .camera_pos = .{ params.cam_pos.x, params.cam_pos.y, params.cam_pos.z, params.cloud_height }, + .cloud_params = .{ params.cloud_coverage, params.cloud_scale, params.wind_offset_x, params.wind_offset_z }, + .sun_params = .{ params.sun_dir.x, params.sun_dir.y, params.sun_dir.z, params.sun_intensity }, + .fog_params = .{ params.fog_color.x, params.fog_color.y, params.fog_color.z, params.fog_density }, + }; + + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.cloud_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT | c.VK_SHADER_STAGE_FRAGMENT_BIT, 0, @sizeOf(CloudPushConstants), &pc); +} diff --git a/src/engine/graphics/vulkan/rhi_resource_lifecycle.zig b/src/engine/graphics/vulkan/rhi_resource_lifecycle.zig new file mode 100644 index 00000000..66624a0d --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_resource_lifecycle.zig @@ -0,0 +1,254 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Utils = @import("utils.zig"); + +pub fn destroyHDRResources(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + if (ctx.hdr.hdr_view != null) { + c.vkDestroyImageView(vk, ctx.hdr.hdr_view, null); + ctx.hdr.hdr_view = null; + } + if (ctx.hdr.hdr_image != null) { + c.vkDestroyImage(vk, ctx.hdr.hdr_image, null); + ctx.hdr.hdr_image = null; + } + if (ctx.hdr.hdr_memory != null) { + c.vkFreeMemory(vk, ctx.hdr.hdr_memory, null); + ctx.hdr.hdr_memory = null; + } + if (ctx.hdr.hdr_msaa_view != null) { + c.vkDestroyImageView(vk, ctx.hdr.hdr_msaa_view, null); + ctx.hdr.hdr_msaa_view = null; + } + if (ctx.hdr.hdr_msaa_image != null) { + c.vkDestroyImage(vk, ctx.hdr.hdr_msaa_image, null); + ctx.hdr.hdr_msaa_image = null; + } + if (ctx.hdr.hdr_msaa_memory != null) { + c.vkFreeMemory(vk, ctx.hdr.hdr_msaa_memory, null); + ctx.hdr.hdr_msaa_memory = null; + } +} + +pub fn destroyPostProcessResources(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + + for (ctx.render_pass_manager.post_process_framebuffers.items) |fb| { + c.vkDestroyFramebuffer(vk, fb, null); + } + ctx.render_pass_manager.post_process_framebuffers.deinit(ctx.allocator); + ctx.render_pass_manager.post_process_framebuffers = .empty; + + ctx.post_process.deinit(vk, ctx.descriptors.descriptor_pool); + + if (ctx.render_pass_manager.post_process_render_pass != null) { + c.vkDestroyRenderPass(vk, ctx.render_pass_manager.post_process_render_pass, null); + ctx.render_pass_manager.post_process_render_pass = null; + } + + destroySwapchainUIResources(ctx); +} + +pub fn destroyGPassResources(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + destroyVelocityResources(ctx); + ctx.ssao_system.deinit(vk, ctx.allocator, ctx.descriptors.descriptor_pool); + if (ctx.pipeline_manager.g_pipeline != null) { + c.vkDestroyPipeline(vk, ctx.pipeline_manager.g_pipeline, null); + ctx.pipeline_manager.g_pipeline = null; + } + if (ctx.render_pass_manager.g_framebuffer != null) { + c.vkDestroyFramebuffer(vk, ctx.render_pass_manager.g_framebuffer, null); + ctx.render_pass_manager.g_framebuffer = null; + } + if (ctx.render_pass_manager.g_render_pass != null) { + c.vkDestroyRenderPass(vk, ctx.render_pass_manager.g_render_pass, null); + ctx.render_pass_manager.g_render_pass = null; + } + if (ctx.gpass.g_normal_view != null) { + c.vkDestroyImageView(vk, ctx.gpass.g_normal_view, null); + ctx.gpass.g_normal_view = null; + } + if (ctx.gpass.g_normal_image != null) { + c.vkDestroyImage(vk, ctx.gpass.g_normal_image, null); + ctx.gpass.g_normal_image = null; + } + if (ctx.gpass.g_normal_memory != null) { + c.vkFreeMemory(vk, ctx.gpass.g_normal_memory, null); + ctx.gpass.g_normal_memory = null; + } + if (ctx.gpass.g_depth_view != null) { + c.vkDestroyImageView(vk, ctx.gpass.g_depth_view, null); + ctx.gpass.g_depth_view = null; + } + if (ctx.gpass.g_depth_image != null) { + c.vkDestroyImage(vk, ctx.gpass.g_depth_image, null); + ctx.gpass.g_depth_image = null; + } + if (ctx.gpass.g_depth_memory != null) { + c.vkFreeMemory(vk, ctx.gpass.g_depth_memory, null); + ctx.gpass.g_depth_memory = null; + } +} + +pub fn destroySwapchainUIPipelines(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + if (vk == null) return; + + if (ctx.pipeline_manager.ui_swapchain_pipeline != null) { + c.vkDestroyPipeline(vk, ctx.pipeline_manager.ui_swapchain_pipeline, null); + ctx.pipeline_manager.ui_swapchain_pipeline = null; + } + if (ctx.pipeline_manager.ui_swapchain_tex_pipeline != null) { + c.vkDestroyPipeline(vk, ctx.pipeline_manager.ui_swapchain_tex_pipeline, null); + ctx.pipeline_manager.ui_swapchain_tex_pipeline = null; + } +} + +pub fn destroySwapchainUIResources(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + if (vk == null) return; + + for (ctx.render_pass_manager.ui_swapchain_framebuffers.items) |fb| { + c.vkDestroyFramebuffer(vk, fb, null); + } + ctx.render_pass_manager.ui_swapchain_framebuffers.deinit(ctx.allocator); + ctx.render_pass_manager.ui_swapchain_framebuffers = .empty; + + if (ctx.render_pass_manager.ui_swapchain_render_pass) |rp| { + c.vkDestroyRenderPass(vk, rp, null); + ctx.render_pass_manager.ui_swapchain_render_pass = null; + } +} + +pub fn destroyFXAAResources(ctx: anytype) void { + destroySwapchainUIPipelines(ctx); + ctx.fxaa.deinit(ctx.vulkan_device.vk_device, ctx.allocator, ctx.descriptors.descriptor_pool); +} + +pub fn destroyBloomResources(ctx: anytype) void { + ctx.bloom.deinit(ctx.vulkan_device.vk_device, ctx.allocator, ctx.descriptors.descriptor_pool); +} + +pub fn destroyVelocityResources(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + if (vk == null) return; + + if (ctx.velocity.velocity_view != null) { + c.vkDestroyImageView(vk, ctx.velocity.velocity_view, null); + ctx.velocity.velocity_view = null; + } + if (ctx.velocity.velocity_image != null) { + c.vkDestroyImage(vk, ctx.velocity.velocity_image, null); + ctx.velocity.velocity_image = null; + } + if (ctx.velocity.velocity_memory != null) { + c.vkFreeMemory(vk, ctx.velocity.velocity_memory, null); + ctx.velocity.velocity_memory = null; + } +} + +pub fn transitionImagesToShaderRead(ctx: anytype, images: []const c.VkImage, is_depth: bool) !void { + const aspect_mask: c.VkImageAspectFlags = if (is_depth) c.VK_IMAGE_ASPECT_DEPTH_BIT else c.VK_IMAGE_ASPECT_COLOR_BIT; + var alloc_info = std.mem.zeroes(c.VkCommandBufferAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + alloc_info.level = c.VK_COMMAND_BUFFER_LEVEL_PRIMARY; + alloc_info.commandPool = ctx.frames.command_pool; + alloc_info.commandBufferCount = 1; + + var cmd: c.VkCommandBuffer = null; + try Utils.checkVk(c.vkAllocateCommandBuffers(ctx.vulkan_device.vk_device, &alloc_info, &cmd)); + var begin_info = std.mem.zeroes(c.VkCommandBufferBeginInfo); + begin_info.sType = c.VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + begin_info.flags = c.VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + try Utils.checkVk(c.vkBeginCommandBuffer(cmd, &begin_info)); + + const count = images.len; + var barriers: [16]c.VkImageMemoryBarrier = undefined; + for (0..count) |i| { + barriers[i] = std.mem.zeroes(c.VkImageMemoryBarrier); + barriers[i].sType = c.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barriers[i].oldLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + barriers[i].newLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barriers[i].srcQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barriers[i].dstQueueFamilyIndex = c.VK_QUEUE_FAMILY_IGNORED; + barriers[i].image = images[i]; + barriers[i].subresourceRange = .{ .aspectMask = aspect_mask, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; + barriers[i].srcAccessMask = 0; + barriers[i].dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT; + } + + c.vkCmdPipelineBarrier(cmd, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, null, 0, null, @intCast(count), &barriers[0]); + + try Utils.checkVk(c.vkEndCommandBuffer(cmd)); + + var submit_info = std.mem.zeroes(c.VkSubmitInfo); + submit_info.sType = c.VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &cmd; + try ctx.vulkan_device.submitGuarded(submit_info, null); + try Utils.checkVk(c.vkQueueWaitIdle(ctx.vulkan_device.queue)); + c.vkFreeCommandBuffers(ctx.vulkan_device.vk_device, ctx.frames.command_pool, 1, &cmd); +} + +fn getMSAASampleCountFlag(samples: u8) c.VkSampleCountFlagBits { + return switch (samples) { + 2 => c.VK_SAMPLE_COUNT_2_BIT, + 4 => c.VK_SAMPLE_COUNT_4_BIT, + 8 => c.VK_SAMPLE_COUNT_8_BIT, + else => c.VK_SAMPLE_COUNT_1_BIT, + }; +} + +pub fn createHDRResources(ctx: anytype) !void { + const extent = ctx.swapchain.getExtent(); + const format = c.VK_FORMAT_R16G16B16A16_SFLOAT; + const sample_count = getMSAASampleCountFlag(ctx.options.msaa_samples); + + var image_info = std.mem.zeroes(c.VkImageCreateInfo); + image_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.imageType = c.VK_IMAGE_TYPE_2D; + image_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; + image_info.mipLevels = 1; + image_info.arrayLayers = 1; + image_info.format = format; + image_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; + image_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + image_info.usage = c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; + image_info.samples = c.VK_SAMPLE_COUNT_1_BIT; + image_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; + + try Utils.checkVk(c.vkCreateImage(ctx.vulkan_device.vk_device, &image_info, null, &ctx.hdr.hdr_image)); + + var mem_reqs: c.VkMemoryRequirements = undefined; + c.vkGetImageMemoryRequirements(ctx.vulkan_device.vk_device, ctx.hdr.hdr_image, &mem_reqs); + var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = mem_reqs.size; + alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + try Utils.checkVk(c.vkAllocateMemory(ctx.vulkan_device.vk_device, &alloc_info, null, &ctx.hdr.hdr_memory)); + try Utils.checkVk(c.vkBindImageMemory(ctx.vulkan_device.vk_device, ctx.hdr.hdr_image, ctx.hdr.hdr_memory, 0)); + + var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + view_info.image = ctx.hdr.hdr_image; + view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; + view_info.format = format; + view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; + try Utils.checkVk(c.vkCreateImageView(ctx.vulkan_device.vk_device, &view_info, null, &ctx.hdr.hdr_view)); + + if (ctx.options.msaa_samples > 1) { + image_info.samples = sample_count; + image_info.usage = c.VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + try Utils.checkVk(c.vkCreateImage(ctx.vulkan_device.vk_device, &image_info, null, &ctx.hdr.hdr_msaa_image)); + c.vkGetImageMemoryRequirements(ctx.vulkan_device.vk_device, ctx.hdr.hdr_msaa_image, &mem_reqs); + alloc_info.allocationSize = mem_reqs.size; + alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + try Utils.checkVk(c.vkAllocateMemory(ctx.vulkan_device.vk_device, &alloc_info, null, &ctx.hdr.hdr_msaa_memory)); + try Utils.checkVk(c.vkBindImageMemory(ctx.vulkan_device.vk_device, ctx.hdr.hdr_msaa_image, ctx.hdr.hdr_msaa_memory, 0)); + + view_info.image = ctx.hdr.hdr_msaa_image; + try Utils.checkVk(c.vkCreateImageView(ctx.vulkan_device.vk_device, &view_info, null, &ctx.hdr.hdr_msaa_view)); + } +} diff --git a/src/engine/graphics/vulkan/rhi_resource_setup.zig b/src/engine/graphics/vulkan/rhi_resource_setup.zig new file mode 100644 index 00000000..961057bd --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_resource_setup.zig @@ -0,0 +1,512 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Utils = @import("utils.zig"); +const shader_registry = @import("shader_registry.zig"); +const build_options = @import("build_options"); +const bindings = @import("descriptor_bindings.zig"); +const lifecycle = @import("rhi_resource_lifecycle.zig"); + +const DEPTH_FORMAT = c.VK_FORMAT_D32_SFLOAT; +const MAX_FRAMES_IN_FLIGHT = rhi.MAX_FRAMES_IN_FLIGHT; + +pub fn createSwapchainUIResources(ctx: anytype) !void { + const vk = ctx.vulkan_device.vk_device; + + lifecycle.destroySwapchainUIResources(ctx); + errdefer lifecycle.destroySwapchainUIResources(ctx); + + try ctx.render_pass_manager.createUISwapchainRenderPass(vk, ctx.swapchain.getImageFormat()); + try ctx.render_pass_manager.createUISwapchainFramebuffers(vk, ctx.allocator, ctx.swapchain.getExtent(), ctx.swapchain.getImageViews()); +} + +pub fn createShadowResources(ctx: anytype) !void { + const vk = ctx.vulkan_device.vk_device; + const shadow_res = ctx.shadow_runtime.shadow_resolution; + var shadow_depth_desc = std.mem.zeroes(c.VkAttachmentDescription); + shadow_depth_desc.format = DEPTH_FORMAT; + shadow_depth_desc.samples = c.VK_SAMPLE_COUNT_1_BIT; + shadow_depth_desc.loadOp = c.VK_ATTACHMENT_LOAD_OP_CLEAR; + shadow_depth_desc.storeOp = c.VK_ATTACHMENT_STORE_OP_STORE; + shadow_depth_desc.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + shadow_depth_desc.finalLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + var shadow_depth_ref = c.VkAttachmentReference{ .attachment = 0, .layout = c.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }; + var shadow_subpass = std.mem.zeroes(c.VkSubpassDescription); + shadow_subpass.pipelineBindPoint = c.VK_PIPELINE_BIND_POINT_GRAPHICS; + shadow_subpass.pDepthStencilAttachment = &shadow_depth_ref; + var shadow_rp_info = std.mem.zeroes(c.VkRenderPassCreateInfo); + shadow_rp_info.sType = c.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + shadow_rp_info.attachmentCount = 1; + shadow_rp_info.pAttachments = &shadow_depth_desc; + shadow_rp_info.subpassCount = 1; + shadow_rp_info.pSubpasses = &shadow_subpass; + + var shadow_dependencies = [_]c.VkSubpassDependency{ + .{ .srcSubpass = c.VK_SUBPASS_EXTERNAL, .dstSubpass = 0, .srcStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, .dstStageMask = c.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, .srcAccessMask = c.VK_ACCESS_SHADER_READ_BIT, .dstAccessMask = c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT }, + .{ .srcSubpass = 0, .dstSubpass = c.VK_SUBPASS_EXTERNAL, .srcStageMask = c.VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, .dstStageMask = c.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, .srcAccessMask = c.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, .dstAccessMask = c.VK_ACCESS_SHADER_READ_BIT, .dependencyFlags = c.VK_DEPENDENCY_BY_REGION_BIT }, + }; + shadow_rp_info.dependencyCount = 2; + shadow_rp_info.pDependencies = &shadow_dependencies; + + try Utils.checkVk(c.vkCreateRenderPass(ctx.vulkan_device.vk_device, &shadow_rp_info, null, &ctx.shadow_system.shadow_render_pass)); + + ctx.shadow_system.shadow_extent = .{ .width = shadow_res, .height = shadow_res }; + + var shadow_img_info = std.mem.zeroes(c.VkImageCreateInfo); + shadow_img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + shadow_img_info.imageType = c.VK_IMAGE_TYPE_2D; + shadow_img_info.extent = .{ .width = shadow_res, .height = shadow_res, .depth = 1 }; + shadow_img_info.mipLevels = 1; + shadow_img_info.arrayLayers = rhi.SHADOW_CASCADE_COUNT; + shadow_img_info.format = DEPTH_FORMAT; + shadow_img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; + shadow_img_info.usage = c.VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; + shadow_img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; + try Utils.checkVk(c.vkCreateImage(ctx.vulkan_device.vk_device, &shadow_img_info, null, &ctx.shadow_system.shadow_image)); + + var mem_reqs: c.VkMemoryRequirements = undefined; + c.vkGetImageMemoryRequirements(vk, ctx.shadow_system.shadow_image, &mem_reqs); + var alloc_info = c.VkMemoryAllocateInfo{ .sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .allocationSize = mem_reqs.size, .memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) }; + try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.shadow_system.shadow_image_memory)); + try Utils.checkVk(c.vkBindImageMemory(vk, ctx.shadow_system.shadow_image, ctx.shadow_system.shadow_image_memory, 0)); + + var array_view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + array_view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + array_view_info.image = ctx.shadow_system.shadow_image; + array_view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D_ARRAY; + array_view_info.format = DEPTH_FORMAT; + array_view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = rhi.SHADOW_CASCADE_COUNT }; + try Utils.checkVk(c.vkCreateImageView(vk, &array_view_info, null, &ctx.shadow_system.shadow_image_view)); + + { + var sampler_info = std.mem.zeroes(c.VkSamplerCreateInfo); + sampler_info.sType = c.VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + sampler_info.magFilter = c.VK_FILTER_LINEAR; + sampler_info.minFilter = c.VK_FILTER_LINEAR; + sampler_info.addressModeU = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; + sampler_info.addressModeV = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; + sampler_info.addressModeW = c.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; + sampler_info.anisotropyEnable = c.VK_FALSE; + sampler_info.maxAnisotropy = 1.0; + sampler_info.borderColor = c.VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK; + sampler_info.compareEnable = c.VK_TRUE; + sampler_info.compareOp = c.VK_COMPARE_OP_GREATER_OR_EQUAL; + + try Utils.checkVk(c.vkCreateSampler(vk, &sampler_info, null, &ctx.shadow_system.shadow_sampler)); + + var regular_sampler_info = sampler_info; + regular_sampler_info.compareEnable = c.VK_FALSE; + regular_sampler_info.compareOp = c.VK_COMPARE_OP_ALWAYS; + try Utils.checkVk(c.vkCreateSampler(vk, ®ular_sampler_info, null, &ctx.shadow_system.shadow_sampler_regular)); + } + + for (0..rhi.SHADOW_CASCADE_COUNT) |si| { + var layer_view: c.VkImageView = null; + var layer_view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + layer_view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + layer_view_info.image = ctx.shadow_system.shadow_image; + layer_view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; + layer_view_info.format = DEPTH_FORMAT; + layer_view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = @intCast(si), .layerCount = 1 }; + try Utils.checkVk(c.vkCreateImageView(vk, &layer_view_info, null, &layer_view)); + ctx.shadow_system.shadow_image_views[si] = layer_view; + + ctx.shadow_runtime.shadow_map_handles[si] = try ctx.resources.registerExternalTexture(shadow_res, shadow_res, .depth, layer_view, ctx.shadow_system.shadow_sampler_regular); + + var fb_info = std.mem.zeroes(c.VkFramebufferCreateInfo); + fb_info.sType = c.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + fb_info.renderPass = ctx.shadow_system.shadow_render_pass; + fb_info.attachmentCount = 1; + fb_info.pAttachments = &ctx.shadow_system.shadow_image_views[si]; + fb_info.width = shadow_res; + fb_info.height = shadow_res; + fb_info.layers = 1; + try Utils.checkVk(c.vkCreateFramebuffer(vk, &fb_info, null, &ctx.shadow_system.shadow_framebuffers[si])); + ctx.shadow_system.shadow_image_layouts[si] = c.VK_IMAGE_LAYOUT_UNDEFINED; + } + + const shadow_vert = try std.fs.cwd().readFileAlloc(shader_registry.SHADOW_VERT, ctx.allocator, @enumFromInt(1024 * 1024)); + defer ctx.allocator.free(shadow_vert); + const shadow_frag = try std.fs.cwd().readFileAlloc(shader_registry.SHADOW_FRAG, ctx.allocator, @enumFromInt(1024 * 1024)); + defer ctx.allocator.free(shadow_frag); + + const shadow_vert_module = try Utils.createShaderModule(vk, shadow_vert); + defer c.vkDestroyShaderModule(vk, shadow_vert_module, null); + const shadow_frag_module = try Utils.createShaderModule(vk, shadow_frag); + defer c.vkDestroyShaderModule(vk, shadow_frag_module, null); + + var shadow_stages = [_]c.VkPipelineShaderStageCreateInfo{ + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_VERTEX_BIT, .module = shadow_vert_module, .pName = "main" }, + .{ .sType = c.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .stage = c.VK_SHADER_STAGE_FRAGMENT_BIT, .module = shadow_frag_module, .pName = "main" }, + }; + + const shadow_binding = c.VkVertexInputBindingDescription{ .binding = 0, .stride = @sizeOf(rhi.Vertex), .inputRate = c.VK_VERTEX_INPUT_RATE_VERTEX }; + var shadow_attrs: [2]c.VkVertexInputAttributeDescription = undefined; + shadow_attrs[0] = .{ .binding = 0, .location = 0, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 0 }; + shadow_attrs[1] = .{ .binding = 0, .location = 1, .format = c.VK_FORMAT_R32G32B32_SFLOAT, .offset = 24 }; + + var shadow_vertex_input = std.mem.zeroes(c.VkPipelineVertexInputStateCreateInfo); + shadow_vertex_input.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + shadow_vertex_input.vertexBindingDescriptionCount = 1; + shadow_vertex_input.pVertexBindingDescriptions = &shadow_binding; + shadow_vertex_input.vertexAttributeDescriptionCount = 2; + shadow_vertex_input.pVertexAttributeDescriptions = &shadow_attrs[0]; + + var shadow_input_assembly = std.mem.zeroes(c.VkPipelineInputAssemblyStateCreateInfo); + shadow_input_assembly.sType = c.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + shadow_input_assembly.topology = c.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + + var shadow_rasterizer = std.mem.zeroes(c.VkPipelineRasterizationStateCreateInfo); + shadow_rasterizer.sType = c.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + shadow_rasterizer.lineWidth = 1.0; + shadow_rasterizer.cullMode = c.VK_CULL_MODE_NONE; + shadow_rasterizer.frontFace = c.VK_FRONT_FACE_COUNTER_CLOCKWISE; + shadow_rasterizer.depthBiasEnable = c.VK_TRUE; + + var shadow_multisampling = std.mem.zeroes(c.VkPipelineMultisampleStateCreateInfo); + shadow_multisampling.sType = c.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + shadow_multisampling.rasterizationSamples = c.VK_SAMPLE_COUNT_1_BIT; + + var shadow_depth_stencil = std.mem.zeroes(c.VkPipelineDepthStencilStateCreateInfo); + shadow_depth_stencil.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + shadow_depth_stencil.depthTestEnable = c.VK_TRUE; + shadow_depth_stencil.depthWriteEnable = c.VK_TRUE; + shadow_depth_stencil.depthCompareOp = c.VK_COMPARE_OP_GREATER_OR_EQUAL; + + var shadow_color_blend = std.mem.zeroes(c.VkPipelineColorBlendStateCreateInfo); + shadow_color_blend.sType = c.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + shadow_color_blend.attachmentCount = 0; + shadow_color_blend.pAttachments = null; + + const shadow_dynamic_states = [_]c.VkDynamicState{ c.VK_DYNAMIC_STATE_VIEWPORT, c.VK_DYNAMIC_STATE_SCISSOR, c.VK_DYNAMIC_STATE_DEPTH_BIAS }; + var shadow_dynamic_state = std.mem.zeroes(c.VkPipelineDynamicStateCreateInfo); + shadow_dynamic_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + shadow_dynamic_state.dynamicStateCount = shadow_dynamic_states.len; + shadow_dynamic_state.pDynamicStates = &shadow_dynamic_states; + + var shadow_viewport_state = std.mem.zeroes(c.VkPipelineViewportStateCreateInfo); + shadow_viewport_state.sType = c.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + shadow_viewport_state.viewportCount = 1; + shadow_viewport_state.scissorCount = 1; + + var shadow_pipeline_info = std.mem.zeroes(c.VkGraphicsPipelineCreateInfo); + shadow_pipeline_info.sType = c.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + shadow_pipeline_info.stageCount = shadow_stages.len; + shadow_pipeline_info.pStages = &shadow_stages[0]; + shadow_pipeline_info.pVertexInputState = &shadow_vertex_input; + shadow_pipeline_info.pInputAssemblyState = &shadow_input_assembly; + shadow_pipeline_info.pViewportState = &shadow_viewport_state; + shadow_pipeline_info.pRasterizationState = &shadow_rasterizer; + shadow_pipeline_info.pMultisampleState = &shadow_multisampling; + shadow_pipeline_info.pDepthStencilState = &shadow_depth_stencil; + shadow_pipeline_info.pColorBlendState = &shadow_color_blend; + shadow_pipeline_info.pDynamicState = &shadow_dynamic_state; + shadow_pipeline_info.layout = ctx.pipeline_manager.pipeline_layout; + shadow_pipeline_info.renderPass = ctx.shadow_system.shadow_render_pass; + shadow_pipeline_info.subpass = 0; + + var new_pipeline: c.VkPipeline = null; + try Utils.checkVk(c.vkCreateGraphicsPipelines(vk, null, 1, &shadow_pipeline_info, null, &new_pipeline)); + + if (ctx.shadow_system.shadow_pipeline != null) { + c.vkDestroyPipeline(vk, ctx.shadow_system.shadow_pipeline, null); + } + ctx.shadow_system.shadow_pipeline = new_pipeline; +} + +pub fn createGPassResources(ctx: anytype) !void { + lifecycle.destroyGPassResources(ctx); + const normal_format = c.VK_FORMAT_R8G8B8A8_UNORM; + const velocity_format = c.VK_FORMAT_R16G16_SFLOAT; + + try ctx.render_pass_manager.createGPassRenderPass(ctx.vulkan_device.vk_device); + + const vk = ctx.vulkan_device.vk_device; + const extent = ctx.swapchain.getExtent(); + + { + var img_info = std.mem.zeroes(c.VkImageCreateInfo); + img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + img_info.imageType = c.VK_IMAGE_TYPE_2D; + img_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; + img_info.mipLevels = 1; + img_info.arrayLayers = 1; + img_info.format = normal_format; + img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; + img_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + img_info.usage = c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; + img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; + img_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; + try Utils.checkVk(c.vkCreateImage(vk, &img_info, null, &ctx.gpass.g_normal_image)); + var mem_reqs: c.VkMemoryRequirements = undefined; + c.vkGetImageMemoryRequirements(vk, ctx.gpass.g_normal_image, &mem_reqs); + var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = mem_reqs.size; + alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.gpass.g_normal_memory)); + try Utils.checkVk(c.vkBindImageMemory(vk, ctx.gpass.g_normal_image, ctx.gpass.g_normal_memory, 0)); + var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + view_info.image = ctx.gpass.g_normal_image; + view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; + view_info.format = normal_format; + view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; + try Utils.checkVk(c.vkCreateImageView(vk, &view_info, null, &ctx.gpass.g_normal_view)); + } + + { + var img_info = std.mem.zeroes(c.VkImageCreateInfo); + img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + img_info.imageType = c.VK_IMAGE_TYPE_2D; + img_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; + img_info.mipLevels = 1; + img_info.arrayLayers = 1; + img_info.format = velocity_format; + img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; + img_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + img_info.usage = c.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; + img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; + img_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; + try Utils.checkVk(c.vkCreateImage(vk, &img_info, null, &ctx.velocity.velocity_image)); + var mem_reqs: c.VkMemoryRequirements = undefined; + c.vkGetImageMemoryRequirements(vk, ctx.velocity.velocity_image, &mem_reqs); + var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = mem_reqs.size; + alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.velocity.velocity_memory)); + try Utils.checkVk(c.vkBindImageMemory(vk, ctx.velocity.velocity_image, ctx.velocity.velocity_memory, 0)); + var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + view_info.image = ctx.velocity.velocity_image; + view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; + view_info.format = velocity_format; + view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; + try Utils.checkVk(c.vkCreateImageView(vk, &view_info, null, &ctx.velocity.velocity_view)); + } + + { + var img_info = std.mem.zeroes(c.VkImageCreateInfo); + img_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + img_info.imageType = c.VK_IMAGE_TYPE_2D; + img_info.extent = .{ .width = extent.width, .height = extent.height, .depth = 1 }; + img_info.mipLevels = 1; + img_info.arrayLayers = 1; + img_info.format = DEPTH_FORMAT; + img_info.tiling = c.VK_IMAGE_TILING_OPTIMAL; + img_info.initialLayout = c.VK_IMAGE_LAYOUT_UNDEFINED; + img_info.usage = c.VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | c.VK_IMAGE_USAGE_SAMPLED_BIT; + img_info.samples = c.VK_SAMPLE_COUNT_1_BIT; + img_info.sharingMode = c.VK_SHARING_MODE_EXCLUSIVE; + try Utils.checkVk(c.vkCreateImage(vk, &img_info, null, &ctx.gpass.g_depth_image)); + var mem_reqs: c.VkMemoryRequirements = undefined; + c.vkGetImageMemoryRequirements(vk, ctx.gpass.g_depth_image, &mem_reqs); + var alloc_info = std.mem.zeroes(c.VkMemoryAllocateInfo); + alloc_info.sType = c.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.allocationSize = mem_reqs.size; + alloc_info.memoryTypeIndex = try Utils.findMemoryType(ctx.vulkan_device.physical_device, mem_reqs.memoryTypeBits, c.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + try Utils.checkVk(c.vkAllocateMemory(vk, &alloc_info, null, &ctx.gpass.g_depth_memory)); + try Utils.checkVk(c.vkBindImageMemory(vk, ctx.gpass.g_depth_image, ctx.gpass.g_depth_memory, 0)); + var view_info = std.mem.zeroes(c.VkImageViewCreateInfo); + view_info.sType = c.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + view_info.image = ctx.gpass.g_depth_image; + view_info.viewType = c.VK_IMAGE_VIEW_TYPE_2D; + view_info.format = DEPTH_FORMAT; + view_info.subresourceRange = .{ .aspectMask = c.VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 }; + try Utils.checkVk(c.vkCreateImageView(vk, &view_info, null, &ctx.gpass.g_depth_view)); + } + + try ctx.render_pass_manager.createGPassFramebuffer(vk, extent, ctx.gpass.g_normal_view, ctx.velocity.velocity_view, ctx.gpass.g_depth_view); + + const g_images = [_]c.VkImage{ ctx.gpass.g_normal_image, ctx.velocity.velocity_image }; + try lifecycle.transitionImagesToShaderRead(ctx, &g_images, false); + const d_images = [_]c.VkImage{ctx.gpass.g_depth_image}; + try lifecycle.transitionImagesToShaderRead(ctx, &d_images, true); + + ctx.gpass.g_pass_extent = extent; + std.log.debug("G-Pass resources created ({}x{}) with velocity buffer", .{ extent.width, extent.height }); +} + +pub fn createSSAOResources(ctx: anytype) !void { + const extent = ctx.swapchain.getExtent(); + try ctx.ssao_system.init( + &ctx.vulkan_device, + ctx.allocator, + ctx.descriptors.descriptor_pool, + ctx.frames.command_pool, + extent.width, + extent.height, + ctx.gpass.g_normal_view, + ctx.gpass.g_depth_view, + ); + + ctx.draw.bound_ssao_handle = try ctx.resources.registerNativeTexture( + ctx.ssao_system.blur_image, + ctx.ssao_system.blur_view, + ctx.ssao_system.sampler, + extent.width, + extent.height, + .red, + ); + + for (0..MAX_FRAMES_IN_FLIGHT) |i| { + var main_ssao_info = c.VkDescriptorImageInfo{ + .sampler = ctx.ssao_system.sampler, + .imageView = ctx.ssao_system.blur_view, + .imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + }; + var main_ssao_write = std.mem.zeroes(c.VkWriteDescriptorSet); + main_ssao_write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + main_ssao_write.dstSet = ctx.descriptors.descriptor_sets[i]; + main_ssao_write.dstBinding = bindings.SSAO_TEXTURE; + main_ssao_write.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + main_ssao_write.descriptorCount = 1; + main_ssao_write.pImageInfo = &main_ssao_info; + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &main_ssao_write, 0, null); + + main_ssao_write.dstSet = ctx.descriptors.lod_descriptor_sets[i]; + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &main_ssao_write, 0, null); + } + + const ssao_images = [_]c.VkImage{ ctx.ssao_system.image, ctx.ssao_system.blur_image }; + try lifecycle.transitionImagesToShaderRead(ctx, &ssao_images, false); +} + +pub fn createPostProcessResources(ctx: anytype) !void { + const vk = ctx.vulkan_device.vk_device; + + try ctx.render_pass_manager.createPostProcessRenderPass(vk, ctx.swapchain.getImageFormat()); + + const global_uniform_size: usize = @intCast(ctx.descriptors.global_ubos[0].size); + try ctx.post_process.init( + vk, + ctx.allocator, + ctx.descriptors.descriptor_pool, + ctx.render_pass_manager.post_process_render_pass, + ctx.hdr.hdr_view, + ctx.descriptors.global_ubos, + global_uniform_size, + ); + + // Create neutral (identity) 3D LUT for color grading and bind it + if (ctx.post_process.lut_texture == 0) { + ctx.post_process.lut_texture = try createNeutralLUT3D(ctx); + } + updatePostProcessLUTDescriptor(ctx); + + try ctx.render_pass_manager.createPostProcessFramebuffers(vk, ctx.allocator, ctx.swapchain.getExtent(), ctx.swapchain.getImageViews()); +} + +/// Generate a 32x32x32 identity LUT where each texel maps to itself: color(r,g,b) = (r,g,b). +fn createNeutralLUT3D(ctx: anytype) !rhi.TextureHandle { + const LUT_SIZE: u32 = 32; + const total = LUT_SIZE * LUT_SIZE * LUT_SIZE; + var data = try ctx.allocator.alloc(u8, total * 4); + defer ctx.allocator.free(data); + + var i: u32 = 0; + var z: u32 = 0; + while (z < LUT_SIZE) : (z += 1) { + var y: u32 = 0; + while (y < LUT_SIZE) : (y += 1) { + var x: u32 = 0; + while (x < LUT_SIZE) : (x += 1) { + data[i + 0] = @intCast((x * 255 + (LUT_SIZE - 1) / 2) / (LUT_SIZE - 1)); + data[i + 1] = @intCast((y * 255 + (LUT_SIZE - 1) / 2) / (LUT_SIZE - 1)); + data[i + 2] = @intCast((z * 255 + (LUT_SIZE - 1) / 2) / (LUT_SIZE - 1)); + data[i + 3] = 255; + i += 4; + } + } + } + + const handle = try ctx.resources.createTexture3D(LUT_SIZE, LUT_SIZE, LUT_SIZE, .rgba, .{ + .min_filter = .linear, + .mag_filter = .linear, + .wrap_s = .clamp_to_edge, + .wrap_t = .clamp_to_edge, + .generate_mipmaps = false, + .is_render_target = false, + }, data); + return handle; +} + +fn updatePostProcessLUTDescriptor(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + if (ctx.post_process.lut_texture == 0) return; + const tex = ctx.resources.textures.get(ctx.post_process.lut_texture) orelse return; + ctx.post_process.updateLUTDescriptor(vk, tex.view, tex.sampler); +} + +pub fn updatePostProcessDescriptorsWithBloom(ctx: anytype) void { + const vk = ctx.vulkan_device.vk_device; + const bloom_view = if (ctx.bloom.mip_views[0] != null) ctx.bloom.mip_views[0] else return; + const sampler = if (ctx.bloom.sampler != null) ctx.bloom.sampler else ctx.post_process.sampler; + ctx.post_process.updateBloomDescriptors(vk, bloom_view, sampler); +} + +pub fn createMainFramebuffers(ctx: anytype) !void { + if (ctx.render_pass_manager.hdr_render_pass == null) return error.RenderPassNotInitialized; + + try ctx.render_pass_manager.createMainFramebuffer( + ctx.vulkan_device.vk_device, + ctx.swapchain.getExtent(), + ctx.hdr.hdr_view, + if (ctx.options.msaa_samples > 1) ctx.hdr.hdr_msaa_view else null, + ctx.swapchain.swapchain.depth_image_view, + ctx.options.msaa_samples, + ); +} + +pub fn destroyMainRenderPassAndPipelines(ctx: anytype) void { + if (ctx.vulkan_device.vk_device == null) return; + _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); + + if (ctx.render_pass_manager.main_framebuffer != null) { + c.vkDestroyFramebuffer(ctx.vulkan_device.vk_device, ctx.render_pass_manager.main_framebuffer, null); + ctx.render_pass_manager.main_framebuffer = null; + } + + if (ctx.pipeline_manager.terrain_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.terrain_pipeline, null); + ctx.pipeline_manager.terrain_pipeline = null; + } + if (ctx.pipeline_manager.wireframe_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.wireframe_pipeline, null); + ctx.pipeline_manager.wireframe_pipeline = null; + } + if (ctx.pipeline_manager.selection_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.selection_pipeline, null); + ctx.pipeline_manager.selection_pipeline = null; + } + if (ctx.pipeline_manager.line_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.line_pipeline, null); + ctx.pipeline_manager.line_pipeline = null; + } + if (ctx.pipeline_manager.sky_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.sky_pipeline, null); + ctx.pipeline_manager.sky_pipeline = null; + } + if (ctx.pipeline_manager.ui_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.ui_pipeline, null); + ctx.pipeline_manager.ui_pipeline = null; + } + if (ctx.pipeline_manager.ui_tex_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.ui_tex_pipeline, null); + ctx.pipeline_manager.ui_tex_pipeline = null; + } + if (comptime build_options.debug_shadows) { + if (ctx.debug_shadow.pipeline) |pipeline| c.vkDestroyPipeline(ctx.vulkan_device.vk_device, pipeline, null); + ctx.debug_shadow.pipeline = null; + } + + if (ctx.pipeline_manager.cloud_pipeline != null) { + c.vkDestroyPipeline(ctx.vulkan_device.vk_device, ctx.pipeline_manager.cloud_pipeline, null); + ctx.pipeline_manager.cloud_pipeline = null; + } + if (ctx.render_pass_manager.hdr_render_pass != null) { + c.vkDestroyRenderPass(ctx.vulkan_device.vk_device, ctx.render_pass_manager.hdr_render_pass, null); + ctx.render_pass_manager.hdr_render_pass = null; + } +} diff --git a/src/engine/graphics/vulkan/rhi_shadow_bridge.zig b/src/engine/graphics/vulkan/rhi_shadow_bridge.zig new file mode 100644 index 00000000..451c2dd2 --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_shadow_bridge.zig @@ -0,0 +1,45 @@ +const rhi = @import("../rhi.zig"); +const Mat4 = @import("../../math/mat4.zig").Mat4; + +const ShadowUniforms = extern struct { + light_space_matrices: [rhi.SHADOW_CASCADE_COUNT]Mat4, + cascade_splits: [4]f32, + shadow_texel_sizes: [4]f32, + shadow_params: [4]f32, // x = light_size (PCSS), y/z/w reserved +}; + +pub fn beginShadowPassInternal(ctx: anytype, cascade_index: u32, light_space_matrix: Mat4) void { + if (!ctx.frames.frame_in_progress) return; + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + ctx.shadow_system.beginPass(command_buffer, cascade_index, light_space_matrix); +} + +pub fn endShadowPassInternal(ctx: anytype) void { + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + ctx.shadow_system.endPass(command_buffer); +} + +pub fn getShadowMapHandle(ctx: anytype, cascade_index: u32) rhi.TextureHandle { + if (cascade_index >= rhi.SHADOW_CASCADE_COUNT) return 0; + return ctx.shadow_runtime.shadow_map_handles[cascade_index]; +} + +pub fn updateShadowUniforms(ctx: anytype, params: rhi.ShadowParams) !void { + var splits = [_]f32{ 0, 0, 0, 0 }; + var sizes = [_]f32{ 0, 0, 0, 0 }; + @memcpy(splits[0..rhi.SHADOW_CASCADE_COUNT], ¶ms.cascade_splits); + @memcpy(sizes[0..rhi.SHADOW_CASCADE_COUNT], ¶ms.shadow_texel_sizes); + + @memcpy(&ctx.shadow_runtime.shadow_texel_sizes, ¶ms.shadow_texel_sizes); + + const shadow_uniforms = ShadowUniforms{ + .light_space_matrices = params.light_space_matrices, + .cascade_splits = splits, + .shadow_texel_sizes = sizes, + .shadow_params = .{ params.light_size, 0.0, 0.0, 0.0 }, + }; + + try ctx.descriptors.updateShadowUniforms(ctx.frames.current_frame, &shadow_uniforms); +} + +pub fn drawDebugShadowMap(_: anytype, _: usize, _: rhi.TextureHandle) void {} diff --git a/src/engine/graphics/vulkan/rhi_state_control.zig b/src/engine/graphics/vulkan/rhi_state_control.zig new file mode 100644 index 00000000..28fc868a --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_state_control.zig @@ -0,0 +1,180 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const frame_orchestration = @import("rhi_frame_orchestration.zig"); + +pub fn waitIdle(ctx: anytype) void { + if (!ctx.frames.dry_run and ctx.vulkan_device.vk_device != null) { + _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); + } +} + +pub fn setTextureUniforms(ctx: anytype, texture_enabled: bool) void { + ctx.options.textures_enabled = texture_enabled; + ctx.draw.descriptors_updated = false; +} + +pub fn setViewport(ctx: anytype, width: u32, height: u32) void { + const fb_w = width; + const fb_h = height; + _ = fb_w; + _ = fb_h; + + var w: c_int = 0; + var h: c_int = 0; + _ = c.SDL_GetWindowSizeInPixels(ctx.window, &w, &h); + + if (!ctx.swapchain.skip_present and (@as(u32, @intCast(w)) != ctx.swapchain.getExtent().width or @as(u32, @intCast(h)) != ctx.swapchain.getExtent().height)) { + ctx.runtime.framebuffer_resized = true; + } + + if (!ctx.frames.frame_in_progress) return; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + var viewport = std.mem.zeroes(c.VkViewport); + viewport.x = 0.0; + viewport.y = 0.0; + viewport.width = @floatFromInt(width); + viewport.height = @floatFromInt(height); + viewport.minDepth = 0.0; + viewport.maxDepth = 1.0; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + var scissor = std.mem.zeroes(c.VkRect2D); + scissor.offset = .{ .x = 0, .y = 0 }; + scissor.extent = .{ .width = width, .height = height }; + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); +} + +pub fn getAllocator(ctx: anytype) std.mem.Allocator { + return ctx.allocator; +} + +pub fn getFrameIndex(ctx: anytype) usize { + return @intCast(ctx.frames.current_frame); +} + +pub fn supportsIndirectFirstInstance(ctx: anytype) bool { + return ctx.vulkan_device.draw_indirect_first_instance; +} + +pub fn recover(ctx: anytype) !void { + if (!ctx.runtime.gpu_fault_detected) return; + + if (ctx.vulkan_device.recovery_count >= ctx.vulkan_device.max_recovery_attempts) { + std.log.err("RHI: Max recovery attempts ({d}) exceeded. GPU is unstable.", .{ctx.vulkan_device.max_recovery_attempts}); + return error.GpuLost; + } + + ctx.vulkan_device.recovery_count += 1; + std.log.info("RHI: Attempting GPU recovery (Attempt {d}/{d})...", .{ ctx.vulkan_device.recovery_count, ctx.vulkan_device.max_recovery_attempts }); + + _ = c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device); + + ctx.runtime.gpu_fault_detected = false; + ctx.mutex.lock(); + defer ctx.mutex.unlock(); + frame_orchestration.recreateSwapchainInternal(ctx); + + if (c.vkDeviceWaitIdle(ctx.vulkan_device.vk_device) != c.VK_SUCCESS) { + std.log.err("RHI: Device unresponsive after recovery. Recovery failed.", .{}); + ctx.vulkan_device.recovery_fail_count += 1; + ctx.runtime.gpu_fault_detected = true; + return error.GpuLost; + } + + ctx.vulkan_device.recovery_success_count += 1; + std.log.info("RHI: Recovery step complete. If issues persist, please restart.", .{}); +} + +pub fn setWireframe(ctx: anytype, enabled: bool) void { + if (ctx.options.wireframe_enabled != enabled) { + ctx.options.wireframe_enabled = enabled; + ctx.draw.terrain_pipeline_bound = false; + } +} + +pub fn setTexturesEnabled(ctx: anytype, enabled: bool) void { + ctx.options.textures_enabled = enabled; +} + +pub fn setDebugShadowView(ctx: anytype, enabled: bool) void { + ctx.options.debug_shadows_active = enabled; +} + +pub fn setVSync(ctx: anytype, enabled: bool) void { + if (ctx.options.vsync_enabled == enabled) return; + + ctx.options.vsync_enabled = enabled; + + var mode_count: u32 = 0; + _ = c.vkGetPhysicalDeviceSurfacePresentModesKHR(ctx.vulkan_device.physical_device, ctx.vulkan_device.surface, &mode_count, null); + + if (mode_count == 0) return; + + var modes: [8]c.VkPresentModeKHR = undefined; + var actual_count: u32 = @min(mode_count, 8); + _ = c.vkGetPhysicalDeviceSurfacePresentModesKHR(ctx.vulkan_device.physical_device, ctx.vulkan_device.surface, &actual_count, &modes); + + if (enabled) { + ctx.options.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; + } else { + ctx.options.present_mode = c.VK_PRESENT_MODE_FIFO_KHR; + for (modes[0..actual_count]) |mode| { + if (mode == c.VK_PRESENT_MODE_IMMEDIATE_KHR) { + ctx.options.present_mode = c.VK_PRESENT_MODE_IMMEDIATE_KHR; + break; + } else if (mode == c.VK_PRESENT_MODE_MAILBOX_KHR) { + ctx.options.present_mode = c.VK_PRESENT_MODE_MAILBOX_KHR; + } + } + } + + ctx.runtime.framebuffer_resized = true; + + const mode_name: []const u8 = switch (ctx.options.present_mode) { + c.VK_PRESENT_MODE_IMMEDIATE_KHR => "IMMEDIATE (VSync OFF)", + c.VK_PRESENT_MODE_MAILBOX_KHR => "MAILBOX (Triple Buffer)", + c.VK_PRESENT_MODE_FIFO_KHR => "FIFO (VSync ON)", + c.VK_PRESENT_MODE_FIFO_RELAXED_KHR => "FIFO_RELAXED", + else => "UNKNOWN", + }; + std.log.info("Vulkan present mode: {s}", .{mode_name}); +} + +pub fn setAnisotropicFiltering(ctx: anytype, level: u8) void { + if (ctx.options.anisotropic_filtering == level) return; + ctx.options.anisotropic_filtering = level; +} + +pub fn setVolumetricDensity(ctx: anytype, density: f32) void { + _ = ctx; + _ = density; +} + +pub fn setMSAA(ctx: anytype, samples: u8) void { + const clamped = @min(samples, ctx.vulkan_device.max_msaa_samples); + if (ctx.options.msaa_samples == clamped) return; + + ctx.options.msaa_samples = clamped; + ctx.swapchain.msaa_samples = clamped; + ctx.runtime.framebuffer_resized = true; + ctx.runtime.pipeline_rebuild_needed = true; + std.log.info("Vulkan MSAA set to {}x (pending swapchain recreation)", .{clamped}); +} + +pub fn getMaxAnisotropy(ctx: anytype) u8 { + return @intFromFloat(@min(ctx.vulkan_device.max_anisotropy, 16.0)); +} + +pub fn getMaxMSAASamples(ctx: anytype) u8 { + return ctx.vulkan_device.max_msaa_samples; +} + +pub fn getFaultCount(ctx: anytype) u32 { + return ctx.vulkan_device.fault_count; +} + +pub fn getValidationErrorCount(ctx: anytype) u32 { + return ctx.vulkan_device.validation_error_count.load(.monotonic); +} diff --git a/src/engine/graphics/vulkan/rhi_timing.zig b/src/engine/graphics/vulkan/rhi_timing.zig new file mode 100644 index 00000000..22d84d19 --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_timing.zig @@ -0,0 +1,126 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); + +const GpuPass = enum { + shadow_0, + shadow_1, + shadow_2, + g_pass, + ssao, + lpv_compute, + sky, + opaque_pass, + cloud, + bloom, + fxaa, + post_process, + + pub const COUNT = 12; +}; + +pub const QUERY_COUNT_PER_FRAME = GpuPass.COUNT * 2; + +fn mapPassName(name: []const u8) ?GpuPass { + if (std.mem.eql(u8, name, "ShadowPass0")) return .shadow_0; + if (std.mem.eql(u8, name, "ShadowPass1")) return .shadow_1; + if (std.mem.eql(u8, name, "ShadowPass2")) return .shadow_2; + if (std.mem.eql(u8, name, "GPass")) return .g_pass; + if (std.mem.eql(u8, name, "SSAOPass")) return .ssao; + if (std.mem.eql(u8, name, "LPVPass")) return .lpv_compute; + if (std.mem.eql(u8, name, "SkyPass")) return .sky; + if (std.mem.eql(u8, name, "OpaquePass")) return .opaque_pass; + if (std.mem.eql(u8, name, "CloudPass")) return .cloud; + if (std.mem.eql(u8, name, "BloomPass")) return .bloom; + if (std.mem.eql(u8, name, "FXAAPass")) return .fxaa; + if (std.mem.eql(u8, name, "PostProcessPass")) return .post_process; + return null; +} + +pub fn beginPassTiming(ctx: anytype, pass_name: []const u8) void { + if (!ctx.timing.timing_enabled or ctx.timing.query_pool == null) return; + + const pass = mapPassName(pass_name) orelse return; + const cmd = ctx.frames.command_buffers[ctx.frames.current_frame]; + if (cmd == null) return; + + const query_index = @as(u32, @intCast(ctx.frames.current_frame * QUERY_COUNT_PER_FRAME)) + @as(u32, @intFromEnum(pass)) * 2; + c.vkCmdWriteTimestamp(cmd, c.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, ctx.timing.query_pool, query_index); +} + +pub fn endPassTiming(ctx: anytype, pass_name: []const u8) void { + if (!ctx.timing.timing_enabled or ctx.timing.query_pool == null) return; + + const pass = mapPassName(pass_name) orelse return; + const cmd = ctx.frames.command_buffers[ctx.frames.current_frame]; + if (cmd == null) return; + + const query_index = @as(u32, @intCast(ctx.frames.current_frame * QUERY_COUNT_PER_FRAME)) + @as(u32, @intFromEnum(pass)) * 2 + 1; + c.vkCmdWriteTimestamp(cmd, c.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, ctx.timing.query_pool, query_index); +} + +pub fn processTimingResults(ctx: anytype) void { + if (!ctx.timing.timing_enabled or ctx.timing.query_pool == null) return; + if (ctx.runtime.frame_index < rhi.MAX_FRAMES_IN_FLIGHT) return; + + const frame = ctx.frames.current_frame; + const offset = frame * QUERY_COUNT_PER_FRAME; + var results: [QUERY_COUNT_PER_FRAME]u64 = .{0} ** QUERY_COUNT_PER_FRAME; + + const res = c.vkGetQueryPoolResults( + ctx.vulkan_device.vk_device, + ctx.timing.query_pool, + @intCast(offset), + QUERY_COUNT_PER_FRAME, + @sizeOf(@TypeOf(results)), + &results, + @sizeOf(u64), + c.VK_QUERY_RESULT_64_BIT, + ); + + if (res == c.VK_SUCCESS) { + const period = ctx.vulkan_device.timestamp_period; + + ctx.timing.timing_results.shadow_pass_ms[0] = @as(f32, @floatFromInt(results[1] -% results[0])) * period / 1e6; + ctx.timing.timing_results.shadow_pass_ms[1] = @as(f32, @floatFromInt(results[3] -% results[2])) * period / 1e6; + ctx.timing.timing_results.shadow_pass_ms[2] = @as(f32, @floatFromInt(results[5] -% results[4])) * period / 1e6; + ctx.timing.timing_results.g_pass_ms = @as(f32, @floatFromInt(results[7] -% results[6])) * period / 1e6; + ctx.timing.timing_results.ssao_pass_ms = @as(f32, @floatFromInt(results[9] -% results[8])) * period / 1e6; + ctx.timing.timing_results.lpv_pass_ms = @as(f32, @floatFromInt(results[11] -% results[10])) * period / 1e6; + ctx.timing.timing_results.sky_pass_ms = @as(f32, @floatFromInt(results[13] -% results[12])) * period / 1e6; + ctx.timing.timing_results.opaque_pass_ms = @as(f32, @floatFromInt(results[15] -% results[14])) * period / 1e6; + ctx.timing.timing_results.cloud_pass_ms = @as(f32, @floatFromInt(results[17] -% results[16])) * period / 1e6; + ctx.timing.timing_results.bloom_pass_ms = @as(f32, @floatFromInt(results[19] -% results[18])) * period / 1e6; + ctx.timing.timing_results.fxaa_pass_ms = @as(f32, @floatFromInt(results[21] -% results[20])) * period / 1e6; + ctx.timing.timing_results.post_process_pass_ms = @as(f32, @floatFromInt(results[23] -% results[22])) * period / 1e6; + + ctx.timing.timing_results.main_pass_ms = ctx.timing.timing_results.sky_pass_ms + ctx.timing.timing_results.opaque_pass_ms + ctx.timing.timing_results.cloud_pass_ms; + ctx.timing.timing_results.validate(); + + ctx.timing.timing_results.total_gpu_ms = 0; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.shadow_pass_ms[0]; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.shadow_pass_ms[1]; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.shadow_pass_ms[2]; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.g_pass_ms; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.ssao_pass_ms; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.lpv_pass_ms; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.main_pass_ms; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.bloom_pass_ms; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.fxaa_pass_ms; + ctx.timing.timing_results.total_gpu_ms += ctx.timing.timing_results.post_process_pass_ms; + + if (ctx.timing.timing_enabled) { + std.debug.print("GPU Frame Time: {d:.2}ms (Shadow: {d:.2}, G-Pass: {d:.2}, SSAO: {d:.2}, LPV: {d:.2}, Main: {d:.2}, Bloom: {d:.2}, FXAA: {d:.2}, Post: {d:.2})\n", .{ + ctx.timing.timing_results.total_gpu_ms, + ctx.timing.timing_results.shadow_pass_ms[0] + ctx.timing.timing_results.shadow_pass_ms[1] + ctx.timing.timing_results.shadow_pass_ms[2], + ctx.timing.timing_results.g_pass_ms, + ctx.timing.timing_results.ssao_pass_ms, + ctx.timing.timing_results.lpv_pass_ms, + ctx.timing.timing_results.main_pass_ms, + ctx.timing.timing_results.bloom_pass_ms, + ctx.timing.timing_results.fxaa_pass_ms, + ctx.timing.timing_results.post_process_pass_ms, + }); + } + } +} diff --git a/src/engine/graphics/vulkan/rhi_ui_submission.zig b/src/engine/graphics/vulkan/rhi_ui_submission.zig new file mode 100644 index 00000000..b04baf44 --- /dev/null +++ b/src/engine/graphics/vulkan/rhi_ui_submission.zig @@ -0,0 +1,286 @@ +const std = @import("std"); +const c = @import("../../../c.zig").c; +const rhi = @import("../rhi.zig"); +const Mat4 = @import("../../math/mat4.zig").Mat4; +const build_options = @import("build_options"); +const pass_orchestration = @import("rhi_pass_orchestration.zig"); + +fn getUIPipeline(ctx: anytype, textured: bool) c.VkPipeline { + if (ctx.ui.ui_using_swapchain) { + return if (textured) ctx.pipeline_manager.ui_swapchain_tex_pipeline else ctx.pipeline_manager.ui_swapchain_pipeline; + } + return if (textured) ctx.pipeline_manager.ui_tex_pipeline else ctx.pipeline_manager.ui_pipeline; +} + +pub fn flushUI(ctx: anytype) void { + if (!ctx.runtime.main_pass_active and !ctx.fxaa.pass_active) { + return; + } + if (ctx.ui.ui_vertex_offset / (6 * @sizeOf(f32)) > ctx.ui.ui_flushed_vertex_count) { + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + const total_vertices: u32 = @intCast(ctx.ui.ui_vertex_offset / (6 * @sizeOf(f32))); + const count = total_vertices - ctx.ui.ui_flushed_vertex_count; + + c.vkCmdDraw(command_buffer, count, 1, ctx.ui.ui_flushed_vertex_count, 0); + ctx.ui.ui_flushed_vertex_count = total_vertices; + } +} + +pub fn begin2DPass(ctx: anytype, screen_width: f32, screen_height: f32) void { + if (!ctx.frames.frame_in_progress) { + return; + } + + const use_swapchain = ctx.runtime.post_process_ran_this_frame; + const ui_pipeline = if (use_swapchain) ctx.pipeline_manager.ui_swapchain_pipeline else ctx.pipeline_manager.ui_pipeline; + if (ui_pipeline == null) return; + + if (use_swapchain) { + if (!ctx.fxaa.pass_active) { + pass_orchestration.beginFXAAPassForUI(ctx); + } + if (!ctx.fxaa.pass_active) return; + } else { + if (!ctx.runtime.main_pass_active) pass_orchestration.beginMainPassInternal(ctx); + if (!ctx.runtime.main_pass_active) return; + } + + ctx.ui.ui_using_swapchain = use_swapchain; + + ctx.ui.ui_screen_width = screen_width; + ctx.ui.ui_screen_height = screen_height; + ctx.ui.ui_in_progress = true; + + const ui_vbo = ctx.ui.ui_vbos[ctx.frames.current_frame]; + if (ui_vbo.mapped_ptr) |ptr| { + ctx.ui.ui_mapped_ptr = ptr; + } else { + std.log.err("UI VBO memory not mapped!", .{}); + } + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ui_pipeline); + ctx.draw.terrain_pipeline_bound = false; + + const offset_val: c.VkDeviceSize = 0; + c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &ui_vbo.buffer, &offset_val); + + const proj = Mat4.orthographic(0, ctx.ui.ui_screen_width, ctx.ui.ui_screen_height, 0, -1, 1); + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.ui_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); + + const viewport = c.VkViewport{ .x = 0, .y = 0, .width = ctx.ui.ui_screen_width, .height = ctx.ui.ui_screen_height, .minDepth = 0, .maxDepth = 1 }; + c.vkCmdSetViewport(command_buffer, 0, 1, &viewport); + const scissor = c.VkRect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = @intFromFloat(ctx.ui.ui_screen_width), .height = @intFromFloat(ctx.ui.ui_screen_height) } }; + c.vkCmdSetScissor(command_buffer, 0, 1, &scissor); +} + +pub fn end2DPass(ctx: anytype) void { + if (!ctx.ui.ui_in_progress) return; + + ctx.ui.ui_mapped_ptr = null; + + flushUI(ctx); + if (ctx.ui.ui_using_swapchain) { + pass_orchestration.endFXAAPassInternal(ctx); + ctx.ui.ui_using_swapchain = false; + } + ctx.ui.ui_in_progress = false; +} + +pub fn drawRect2D(ctx: anytype, rect: rhi.Rect, color: rhi.Color) void { + const x = rect.x; + const y = rect.y; + const w = rect.width; + const h = rect.height; + + const vertices = [_]f32{ + x, y, color.r, color.g, color.b, color.a, + x + w, y, color.r, color.g, color.b, color.a, + x + w, y + h, color.r, color.g, color.b, color.a, + x, y, color.r, color.g, color.b, color.a, + x + w, y + h, color.r, color.g, color.b, color.a, + x, y + h, color.r, color.g, color.b, color.a, + }; + + const size = @sizeOf(@TypeOf(vertices)); + + const ui_vbo = ctx.ui.ui_vbos[ctx.frames.current_frame]; + if (ctx.ui.ui_vertex_offset + size > ui_vbo.size) { + return; + } + + if (ctx.ui.ui_mapped_ptr) |ptr| { + const dest = @as([*]u8, @ptrCast(ptr)) + ctx.ui.ui_vertex_offset; + @memcpy(dest[0..size], std.mem.asBytes(&vertices)); + ctx.ui.ui_vertex_offset += size; + } +} + +pub fn bindUIPipeline(ctx: anytype, textured: bool) void { + if (!ctx.frames.frame_in_progress) return; + + ctx.draw.terrain_pipeline_bound = false; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + const pipeline = getUIPipeline(ctx, textured); + if (pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); +} + +pub fn drawTexture2D(ctx: anytype, texture: rhi.TextureHandle, rect: rhi.Rect) void { + if (!ctx.frames.frame_in_progress or !ctx.ui.ui_in_progress) return; + + flushUI(ctx); + + const tex_opt = ctx.resources.textures.get(texture); + if (tex_opt == null) { + std.log.err("drawTexture2D: Texture handle {} not found in textures map!", .{texture}); + return; + } + const tex = tex_opt.?; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + const textured_pipeline = getUIPipeline(ctx, true); + if (textured_pipeline == null) return; + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, textured_pipeline); + ctx.draw.terrain_pipeline_bound = false; + + var image_info = std.mem.zeroes(c.VkDescriptorImageInfo); + image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + image_info.imageView = tex.view; + image_info.sampler = tex.sampler; + + const frame = ctx.frames.current_frame; + const idx = ctx.ui.ui_tex_descriptor_next[frame]; + const pool_len = ctx.ui.ui_tex_descriptor_pool[frame].len; + ctx.ui.ui_tex_descriptor_next[frame] = @intCast((idx + 1) % pool_len); + const ds = ctx.ui.ui_tex_descriptor_pool[frame][idx]; + + var write = std.mem.zeroes(c.VkWriteDescriptorSet); + write.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write.dstSet = ds; + write.dstBinding = 0; + write.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + write.descriptorCount = 1; + write.pImageInfo = &image_info; + + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write, 0, null); + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.pipeline_manager.ui_tex_pipeline_layout, 0, 1, &ds, 0, null); + + const proj = Mat4.orthographic(0, ctx.ui.ui_screen_width, ctx.ui.ui_screen_height, 0, -1, 1); + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.ui_tex_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); + + const x = rect.x; + const y = rect.y; + const w = rect.width; + const h = rect.height; + + const vertices = [_]f32{ + x, y, 0.0, 0.0, 0.0, 0.0, + x + w, y, 1.0, 0.0, 0.0, 0.0, + x + w, y + h, 1.0, 1.0, 0.0, 0.0, + x, y, 0.0, 0.0, 0.0, 0.0, + x + w, y + h, 1.0, 1.0, 0.0, 0.0, + x, y + h, 0.0, 1.0, 0.0, 0.0, + }; + + const size = @sizeOf(@TypeOf(vertices)); + if (ctx.ui.ui_mapped_ptr) |ptr| { + const ui_vbo = ctx.ui.ui_vbos[ctx.frames.current_frame]; + if (ctx.ui.ui_vertex_offset + size <= ui_vbo.size) { + const dest = @as([*]u8, @ptrCast(ptr)) + ctx.ui.ui_vertex_offset; + @memcpy(dest[0..size], std.mem.asBytes(&vertices)); + + const start_vertex = @as(u32, @intCast(ctx.ui.ui_vertex_offset / (6 * @sizeOf(f32)))); + c.vkCmdDraw(command_buffer, 6, 1, start_vertex, 0); + + ctx.ui.ui_vertex_offset += size; + ctx.ui.ui_flushed_vertex_count = @intCast(ctx.ui.ui_vertex_offset / (6 * @sizeOf(f32))); + } + } + + const restore_pipeline = getUIPipeline(ctx, false); + if (restore_pipeline != null) { + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, restore_pipeline); + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.ui_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); + } +} + +pub fn drawDepthTexture(ctx: anytype, texture: rhi.TextureHandle, rect: rhi.Rect) void { + if (comptime !build_options.debug_shadows) return; + if (!ctx.frames.frame_in_progress or !ctx.ui.ui_in_progress) return; + + if (ctx.debug_shadow.pipeline == null) return; + + flushUI(ctx); + + const tex_opt = ctx.resources.textures.get(texture); + if (tex_opt == null) { + std.log.err("drawDepthTexture: Texture handle {} not found in textures map!", .{texture}); + return; + } + const tex = tex_opt.?; + + const command_buffer = ctx.frames.command_buffers[ctx.frames.current_frame]; + + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.debug_shadow.pipeline.?); + ctx.draw.terrain_pipeline_bound = false; + + const width_f32 = ctx.ui.ui_screen_width; + const height_f32 = ctx.ui.ui_screen_height; + const proj = Mat4.orthographic(0, width_f32, height_f32, 0, -1, 1); + c.vkCmdPushConstants(command_buffer, ctx.debug_shadow.pipeline_layout.?, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); + + var image_info = std.mem.zeroes(c.VkDescriptorImageInfo); + image_info.imageLayout = c.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + image_info.imageView = tex.view; + image_info.sampler = tex.sampler; + + const frame = ctx.frames.current_frame; + const idx = ctx.debug_shadow.descriptor_next[frame]; + const pool_len = ctx.debug_shadow.descriptor_pool[frame].len; + ctx.debug_shadow.descriptor_next[frame] = @intCast((idx + 1) % pool_len); + const ds = ctx.debug_shadow.descriptor_pool[frame][idx] orelse return; + + var write_set = std.mem.zeroes(c.VkWriteDescriptorSet); + write_set.sType = c.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write_set.dstSet = ds; + write_set.dstBinding = 0; + write_set.descriptorType = c.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + write_set.descriptorCount = 1; + write_set.pImageInfo = &image_info; + + c.vkUpdateDescriptorSets(ctx.vulkan_device.vk_device, 1, &write_set, 0, null); + c.vkCmdBindDescriptorSets(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, ctx.debug_shadow.pipeline_layout.?, 0, 1, &ds, 0, null); + + const debug_x = rect.x; + const debug_y = rect.y; + const debug_w = rect.width; + const debug_h = rect.height; + + const debug_vertices = [_]f32{ + debug_x, debug_y, 0.0, 0.0, + debug_x + debug_w, debug_y, 1.0, 0.0, + debug_x + debug_w, debug_y + debug_h, 1.0, 1.0, + debug_x, debug_y, 0.0, 0.0, + debug_x + debug_w, debug_y + debug_h, 1.0, 1.0, + debug_x, debug_y + debug_h, 0.0, 1.0, + }; + + if (ctx.debug_shadow.vbo.mapped_ptr) |ptr| { + @memcpy(@as([*]u8, @ptrCast(ptr))[0..@sizeOf(@TypeOf(debug_vertices))], std.mem.asBytes(&debug_vertices)); + + const offset: c.VkDeviceSize = 0; + c.vkCmdBindVertexBuffers(command_buffer, 0, 1, &ctx.debug_shadow.vbo.buffer, &offset); + c.vkCmdDraw(command_buffer, 6, 1, 0, 0); + } + + const restore_pipeline = getUIPipeline(ctx, false); + if (restore_pipeline != null) { + c.vkCmdBindPipeline(command_buffer, c.VK_PIPELINE_BIND_POINT_GRAPHICS, restore_pipeline); + c.vkCmdPushConstants(command_buffer, ctx.pipeline_manager.ui_pipeline_layout, c.VK_SHADER_STAGE_VERTEX_BIT, 0, @sizeOf(Mat4), &proj.data); + } +} diff --git a/src/engine/graphics/vulkan/shadow_system.zig b/src/engine/graphics/vulkan/shadow_system.zig new file mode 100644 index 00000000..0a58ca6c --- /dev/null +++ b/src/engine/graphics/vulkan/shadow_system.zig @@ -0,0 +1,3 @@ +const ShadowSystemImpl = @import("../shadow_system.zig"); + +pub const ShadowSystem = ShadowSystemImpl.ShadowSystem; diff --git a/src/engine/graphics/vulkan/ssao_system.zig b/src/engine/graphics/vulkan/ssao_system.zig index 13a4500c..6e818c40 100644 --- a/src/engine/graphics/vulkan/ssao_system.zig +++ b/src/engine/graphics/vulkan/ssao_system.zig @@ -71,7 +71,7 @@ pub const SSAOSystem = struct { self.params.bias = DEFAULT_BIAS; try self.initRenderPasses(vk, ao_format); - errdefer self.deinit(vk, allocator); + errdefer self.deinit(vk, allocator, descriptor_pool); try self.initImages(device, width, height, ao_format); try self.initFramebuffers(vk, width, height); @@ -490,12 +490,25 @@ pub const SSAOSystem = struct { } } - pub fn deinit(self: *SSAOSystem, vk: c.VkDevice, allocator: Allocator) void { + pub fn deinit(self: *SSAOSystem, vk: c.VkDevice, allocator: Allocator, descriptor_pool: c.VkDescriptorPool) void { _ = allocator; if (self.pipeline != null) c.vkDestroyPipeline(vk, self.pipeline, null); if (self.blur_pipeline != null) c.vkDestroyPipeline(vk, self.blur_pipeline, null); if (self.pipeline_layout != null) c.vkDestroyPipelineLayout(vk, self.pipeline_layout, null); if (self.blur_pipeline_layout != null) c.vkDestroyPipelineLayout(vk, self.blur_pipeline_layout, null); + // Free descriptor sets BEFORE destroying their layouts + if (descriptor_pool != null) { + for (0..rhi.MAX_FRAMES_IN_FLIGHT) |i| { + if (self.descriptor_sets[i] != null) { + _ = c.vkFreeDescriptorSets(vk, descriptor_pool, 1, &self.descriptor_sets[i]); + self.descriptor_sets[i] = null; + } + if (self.blur_descriptor_sets[i] != null) { + _ = c.vkFreeDescriptorSets(vk, descriptor_pool, 1, &self.blur_descriptor_sets[i]); + self.blur_descriptor_sets[i] = null; + } + } + } if (self.descriptor_set_layout != null) c.vkDestroyDescriptorSetLayout(vk, self.descriptor_set_layout, null); if (self.blur_descriptor_set_layout != null) c.vkDestroyDescriptorSetLayout(vk, self.blur_descriptor_set_layout, null); if (self.framebuffer != null) c.vkDestroyFramebuffer(vk, self.framebuffer, null); @@ -570,41 +583,3 @@ pub const SSAOSystem = struct { } } }; - -test "SSAOSystem noise generation" { - var rng = std.Random.DefaultPrng.init(12345); - const data1 = SSAOSystem.generateNoiseData(&rng); - rng = std.Random.DefaultPrng.init(12345); - const data2 = SSAOSystem.generateNoiseData(&rng); - - try std.testing.expectEqual(data1, data2); - - // Verify some properties - for (0..NOISE_SIZE * NOISE_SIZE) |i| { - // Red and Green should be random but in 0-255 range (always true for u8) - // Blue should be 0 - try std.testing.expectEqual(@as(u8, 0), data1[i * 4 + 2]); - // Alpha should be 255 - try std.testing.expectEqual(@as(u8, 255), data1[i * 4 + 3]); - } -} - -test "SSAOSystem kernel generation" { - var rng = std.Random.DefaultPrng.init(67890); - const samples1 = SSAOSystem.generateKernelSamples(&rng); - rng = std.Random.DefaultPrng.init(67890); - const samples2 = SSAOSystem.generateKernelSamples(&rng); - - for (0..KERNEL_SIZE) |i| { - try std.testing.expectEqual(samples1[i][0], samples2[i][0]); - try std.testing.expectEqual(samples1[i][1], samples2[i][1]); - try std.testing.expectEqual(samples1[i][2], samples2[i][2]); - try std.testing.expectEqual(samples1[i][3], samples2[i][3]); - - // Hemisphere check: z must be >= 0 - try std.testing.expect(samples1[i][2] >= 0.0); - // Length check: should be <= 1.0 (scaled by falloff) - const len = @sqrt(samples1[i][0] * samples1[i][0] + samples1[i][1] * samples1[i][1] + samples1[i][2] * samples1[i][2]); - try std.testing.expect(len <= 1.0); - } -} diff --git a/src/engine/graphics/vulkan/ssao_system_tests.zig b/src/engine/graphics/vulkan/ssao_system_tests.zig new file mode 100644 index 00000000..8a766cf5 --- /dev/null +++ b/src/engine/graphics/vulkan/ssao_system_tests.zig @@ -0,0 +1,34 @@ +const std = @import("std"); +const ssao = @import("ssao_system.zig"); + +test "SSAOSystem noise generation" { + var rng = std.Random.DefaultPrng.init(12345); + const data1 = ssao.SSAOSystem.generateNoiseData(&rng); + rng = std.Random.DefaultPrng.init(12345); + const data2 = ssao.SSAOSystem.generateNoiseData(&rng); + + try std.testing.expectEqual(data1, data2); + + for (0..ssao.NOISE_SIZE * ssao.NOISE_SIZE) |i| { + try std.testing.expectEqual(@as(u8, 0), data1[i * 4 + 2]); + try std.testing.expectEqual(@as(u8, 255), data1[i * 4 + 3]); + } +} + +test "SSAOSystem kernel generation" { + var rng = std.Random.DefaultPrng.init(67890); + const samples1 = ssao.SSAOSystem.generateKernelSamples(&rng); + rng = std.Random.DefaultPrng.init(67890); + const samples2 = ssao.SSAOSystem.generateKernelSamples(&rng); + + for (0..ssao.KERNEL_SIZE) |i| { + try std.testing.expectEqual(samples1[i][0], samples2[i][0]); + try std.testing.expectEqual(samples1[i][1], samples2[i][1]); + try std.testing.expectEqual(samples1[i][2], samples2[i][2]); + try std.testing.expectEqual(samples1[i][3], samples2[i][3]); + + try std.testing.expect(samples1[i][2] >= 0.0); + const len = @sqrt(samples1[i][0] * samples1[i][0] + samples1[i][1] * samples1[i][1] + samples1[i][2] * samples1[i][2]); + try std.testing.expect(len <= 1.0); + } +} diff --git a/src/engine/graphics/vulkan/swapchain.zig b/src/engine/graphics/vulkan/swapchain.zig new file mode 100644 index 00000000..b1dc235a --- /dev/null +++ b/src/engine/graphics/vulkan/swapchain.zig @@ -0,0 +1,3 @@ +const VulkanSwapchainImpl = @import("../vulkan_swapchain.zig"); + +pub const VulkanSwapchain = VulkanSwapchainImpl.VulkanSwapchain; diff --git a/src/engine/ui/debug_lpv_overlay.zig b/src/engine/ui/debug_lpv_overlay.zig new file mode 100644 index 00000000..06bb801e --- /dev/null +++ b/src/engine/ui/debug_lpv_overlay.zig @@ -0,0 +1,35 @@ +const std = @import("std"); +const rhi = @import("../graphics/rhi.zig"); +const IUIContext = rhi.IUIContext; + +pub const DebugLPVOverlay = struct { + pub const Config = struct { + // Optional explicit size; <= 0 uses screen-relative fallback. + width: f32 = 0.0, + height: f32 = 0.0, + spacing: f32 = 10.0, + }; + + pub fn rect(screen_height: f32, config: Config) rhi.Rect { + const fallback_size = std.math.clamp(screen_height * 0.28, 160.0, 280.0); + const width = if (config.width > 0.0) config.width else fallback_size; + const height = if (config.height > 0.0) config.height else fallback_size; + return .{ + .x = config.spacing, + .y = screen_height - height - config.spacing, + .width = width, + .height = height, + }; + } + + pub fn draw(ui: IUIContext, lpv_texture: rhi.TextureHandle, screen_width: f32, screen_height: f32, config: Config) void { + if (lpv_texture == 0) return; + + const r = rect(screen_height, config); + + ui.beginPass(screen_width, screen_height); + defer ui.endPass(); + + ui.drawTexture(lpv_texture, r); + } +}; diff --git a/src/engine/ui/timing_overlay.zig b/src/engine/ui/timing_overlay.zig index 140c26fc..421d9d5e 100644 --- a/src/engine/ui/timing_overlay.zig +++ b/src/engine/ui/timing_overlay.zig @@ -16,7 +16,7 @@ pub const TimingOverlay = struct { const width: f32 = 280; const line_height: f32 = 15; const scale: f32 = 1.0; - const num_lines = 13; // Title + 11 passes + Total + const num_lines = 14; // Title + 12 passes + Total const padding = 20; // Spacers and margins // Background @@ -31,6 +31,7 @@ pub const TimingOverlay = struct { drawTimingLine(ui, "SHADOW 2:", results.shadow_pass_ms[2], x + 10, &y, scale, Color.gray); drawTimingLine(ui, "G-PASS:", results.g_pass_ms, x + 10, &y, scale, Color.gray); drawTimingLine(ui, "SSAO:", results.ssao_pass_ms, x + 10, &y, scale, Color.gray); + drawTimingLine(ui, "LPV:", results.lpv_pass_ms, x + 10, &y, scale, Color.gray); drawTimingLine(ui, "SKY:", results.sky_pass_ms, x + 10, &y, scale, Color.gray); drawTimingLine(ui, "OPAQUE:", results.opaque_pass_ms, x + 10, &y, scale, Color.gray); drawTimingLine(ui, "CLOUDS:", results.cloud_pass_ms, x + 10, &y, scale, Color.gray); diff --git a/src/game/app.zig b/src/game/app.zig index dd9d3b02..0bcf8573 100644 --- a/src/game/app.zig +++ b/src/game/app.zig @@ -20,6 +20,7 @@ const render_graph_pkg = @import("../engine/graphics/render_graph.zig"); const RenderGraph = render_graph_pkg.RenderGraph; const AtmosphereSystem = @import("../engine/graphics/atmosphere_system.zig").AtmosphereSystem; const MaterialSystem = @import("../engine/graphics/material_system.zig").MaterialSystem; +const LPVSystem = @import("../engine/graphics/lpv_system.zig").LPVSystem; const ResourcePackManager = @import("../engine/graphics/resource_pack.zig").ResourcePackManager; const AudioSystem = @import("../engine/audio/system.zig").AudioSystem; const TimingOverlay = @import("../engine/ui/timing_overlay.zig").TimingOverlay; @@ -46,8 +47,9 @@ pub const App = struct { render_graph: RenderGraph, atmosphere_system: *AtmosphereSystem, material_system: *MaterialSystem, + lpv_system: *LPVSystem, audio_system: *AudioSystem, - shadow_passes: [3]render_graph_pkg.ShadowPass, + shadow_passes: [4]render_graph_pkg.ShadowPass, g_pass: render_graph_pkg.GPass, ssao_pass: render_graph_pkg.SSAOPass, sky_pass: render_graph_pkg.SkyPass, @@ -233,11 +235,13 @@ pub const App = struct { .render_graph = RenderGraph.init(allocator), .atmosphere_system = atmosphere_system, .material_system = undefined, + .lpv_system = undefined, .audio_system = audio_system, .shadow_passes = .{ render_graph_pkg.ShadowPass.init(0), render_graph_pkg.ShadowPass.init(1), render_graph_pkg.ShadowPass.init(2), + render_graph_pkg.ShadowPass.init(3), }, .g_pass = .{}, .ssao_pass = .{}, @@ -271,6 +275,16 @@ pub const App = struct { app.material_system = try MaterialSystem.init(allocator, rhi, &app.atlas); errdefer app.material_system.deinit(); + app.lpv_system = try LPVSystem.init( + allocator, + rhi, + settings.lpv_grid_size, + settings.lpv_cell_size, + settings.lpv_intensity, + settings.lpv_propagation_iterations, + settings.lpv_enabled, + ); + errdefer app.lpv_system.deinit(); // Sync FXAA and Bloom settings to RHI after initialization app.rhi.setFXAA(settings.fxaa_enabled); @@ -289,6 +303,7 @@ pub const App = struct { try app.render_graph.addPass(app.shadow_passes[0].pass()); try app.render_graph.addPass(app.shadow_passes[1].pass()); try app.render_graph.addPass(app.shadow_passes[2].pass()); + try app.render_graph.addPass(app.shadow_passes[3].pass()); try app.render_graph.addPass(app.g_pass.pass()); try app.render_graph.addPass(app.ssao_pass.pass()); try app.render_graph.addPass(app.sky_pass.pass()); @@ -325,6 +340,7 @@ pub const App = struct { self.render_graph.deinit(); self.atmosphere_system.deinit(); self.material_system.deinit(); + self.lpv_system.deinit(); self.audio_system.deinit(); self.atlas.deinit(); if (self.env_map) |*t| t.deinit(); @@ -350,6 +366,7 @@ pub const App = struct { .render_graph = &self.render_graph, .atmosphere_system = self.atmosphere_system, .material_system = self.material_system, + .lpv_system = self.lpv_system, .audio_system = self.audio_system, .env_map_ptr = &self.env_map, .shader = self.shader, @@ -425,6 +442,11 @@ pub const App = struct { .volumetric_steps = 0, .volumetric_scattering = 0, .ssao_enabled = false, + .lpv_enabled = false, + .lpv_intensity = 0, + .lpv_cell_size = 2.0, + .lpv_grid_size = 32, + .lpv_origin = Vec3.zero, }); // Update current screen. Transitions happen here. diff --git a/src/game/input_mapper.zig b/src/game/input_mapper.zig index b3be8edb..49a77823 100644 --- a/src/game/input_mapper.zig +++ b/src/game/input_mapper.zig @@ -155,6 +155,8 @@ pub const GameAction = enum(u8) { toggle_clouds, /// Toggle fog toggle_fog, + /// Toggle LPV debug overlay + toggle_lpv_overlay, pub const count = @typeInfo(GameAction).@"enum".fields.len; }; @@ -347,6 +349,7 @@ pub const DEFAULT_BINDINGS = blk: { bindings[@intFromEnum(GameAction.toggle_ssao)] = ActionBinding.init(.{ .key = .f8 }); bindings[@intFromEnum(GameAction.toggle_clouds)] = ActionBinding.init(.{ .key = .f9 }); bindings[@intFromEnum(GameAction.toggle_fog)] = ActionBinding.init(.{ .key = .f10 }); + bindings[@intFromEnum(GameAction.toggle_lpv_overlay)] = ActionBinding.init(.{ .key = .f11 }); // Map controls bindings[@intFromEnum(GameAction.toggle_map)] = ActionBinding.init(.{ .key = .m }); diff --git a/src/game/screen.zig b/src/game/screen.zig index de179f4f..a7c80fe5 100644 --- a/src/game/screen.zig +++ b/src/game/screen.zig @@ -15,6 +15,7 @@ const TextureAtlas = @import("../engine/graphics/texture_atlas.zig").TextureAtla const RenderGraph = @import("../engine/graphics/render_graph.zig").RenderGraph; const AtmosphereSystem = @import("../engine/graphics/atmosphere_system.zig").AtmosphereSystem; const MaterialSystem = @import("../engine/graphics/material_system.zig").MaterialSystem; +const LPVSystem = @import("../engine/graphics/lpv_system.zig").LPVSystem; const Texture = @import("../engine/graphics/texture.zig").Texture; const AudioSystem = @import("../engine/audio/system.zig").AudioSystem; const rhi_pkg = @import("../engine/graphics/rhi.zig"); @@ -28,6 +29,7 @@ pub const EngineContext = struct { render_graph: *RenderGraph, atmosphere_system: *AtmosphereSystem, material_system: *MaterialSystem, + lpv_system: *LPVSystem, audio_system: *AudioSystem, env_map_ptr: ?*?Texture, shader: rhi_pkg.ShaderHandle, diff --git a/src/game/screens/graphics.zig b/src/game/screens/graphics.zig index 8909ac70..e877e9e4 100644 --- a/src/game/screens/graphics.zig +++ b/src/game/screens/graphics.zig @@ -220,9 +220,22 @@ pub const GraphicsScreen = struct { ctx.rhi.*.setBloom(settings.bloom_enabled); } else if (std.mem.eql(u8, decl.name, "bloom_intensity")) { ctx.rhi.*.setBloomIntensity(settings.bloom_intensity); + } else if (std.mem.eql(u8, decl.name, "vignette_enabled")) { + ctx.rhi.*.setVignetteEnabled(settings.vignette_enabled); + } else if (std.mem.eql(u8, decl.name, "vignette_intensity")) { + ctx.rhi.*.setVignetteIntensity(settings.vignette_intensity); + } else if (std.mem.eql(u8, decl.name, "film_grain_enabled")) { + ctx.rhi.*.setFilmGrainEnabled(settings.film_grain_enabled); + } else if (std.mem.eql(u8, decl.name, "film_grain_intensity")) { + ctx.rhi.*.setFilmGrainIntensity(settings.film_grain_intensity); } } + if (std.mem.eql(u8, decl.name, "lpv_quality_preset")) { + const legend = getLPVQualityLegend(settings.lpv_quality_preset); + Font.drawText(ui, legend, vx - 90.0 * ui_scale, sy + row_height - 10.0 * ui_scale, 1.2 * ui_scale, Color.rgba(0.72, 0.86, 0.98, 1.0)); + } + sy += row_height; } @@ -247,3 +260,11 @@ fn getPresetLabel(idx: usize) []const u8 { if (idx >= settings_pkg.json_presets.graphics_presets.items.len) return "CUSTOM"; return settings_pkg.json_presets.graphics_presets.items[idx].name; } + +fn getLPVQualityLegend(preset: u32) []const u8 { + return switch (preset) { + 0 => "GRID16 ITER2 TICK8", + 2 => "GRID64 ITER5 TICK3", + else => "GRID32 ITER3 TICK6", + }; +} diff --git a/src/game/screens/world.zig b/src/game/screens/world.zig index 861c3f32..c190003b 100644 --- a/src/game/screens/world.zig +++ b/src/game/screens/world.zig @@ -10,6 +10,8 @@ const rhi_pkg = @import("../../engine/graphics/rhi.zig"); const render_graph_pkg = @import("../../engine/graphics/render_graph.zig"); const PausedScreen = @import("paused.zig").PausedScreen; const DebugShadowOverlay = @import("../../engine/ui/debug_shadow_overlay.zig").DebugShadowOverlay; +const DebugLPVOverlay = @import("../../engine/ui/debug_lpv_overlay.zig").DebugLPVOverlay; +const Font = @import("../../engine/ui/font.zig"); const log = @import("../../engine/core/log.zig"); pub const WorldScreen = struct { @@ -110,6 +112,11 @@ pub const WorldScreen = struct { log.log.info("Fog {s}", .{if (self.session.atmosphere.fog_enabled) "enabled" else "disabled"}); self.last_debug_toggle_time = now; } + if (can_toggle_debug and ctx.input_mapper.isActionPressed(ctx.input, .toggle_lpv_overlay)) { + ctx.settings.debug_lpv_overlay_active = !ctx.settings.debug_lpv_overlay_active; + log.log.info("LPV overlay {s}", .{if (ctx.settings.debug_lpv_overlay_active) "enabled" else "disabled"}); + self.last_debug_toggle_time = now; + } // Update Audio Listener const cam = &self.session.player.camera; @@ -150,6 +157,21 @@ pub const WorldScreen = struct { const ssao_enabled = ctx.settings.ssao_enabled and !ctx.disable_ssao and !ctx.disable_gpass_draw; const cloud_shadows_enabled = ctx.settings.cloud_shadows_enabled and !ctx.disable_clouds; + + const lpv_quality = resolveLPVQuality(ctx.settings.lpv_quality_preset); + try ctx.lpv_system.setSettings( + ctx.settings.lpv_enabled, + ctx.settings.lpv_intensity, + ctx.settings.lpv_cell_size, + lpv_quality.propagation_iterations, + lpv_quality.grid_size, + lpv_quality.update_interval_frames, + ); + ctx.rhi.timing().beginPassTiming("LPVPass"); + try ctx.lpv_system.update(self.session.world, camera.position, ctx.settings.debug_lpv_overlay_active); + ctx.rhi.timing().endPassTiming("LPVPass"); + + const lpv_origin = ctx.lpv_system.getOrigin(); const cloud_params: rhi_pkg.CloudParams = blk: { const p = self.session.clouds.getShadowParams(); break :blk .{ @@ -181,6 +203,11 @@ pub const WorldScreen = struct { .volumetric_steps = ctx.settings.volumetric_steps, .volumetric_scattering = ctx.settings.volumetric_scattering, .ssao_enabled = ssao_enabled, + .lpv_enabled = ctx.settings.lpv_enabled, + .lpv_intensity = ctx.settings.lpv_intensity, + .lpv_cell_size = ctx.lpv_system.getCellSize(), + .lpv_grid_size = ctx.lpv_system.getGridSize(), + .lpv_origin = lpv_origin, }; }; @@ -189,6 +216,10 @@ pub const WorldScreen = struct { const env_map_handle = if (ctx.env_map_ptr) |e_ptr| (if (e_ptr.*) |t| t.handle else 0) else 0; + // Frame-local cascade storage: computed once by the first ShadowPass, + // then reused by subsequent cascade passes for consistency. + var frame_cascades: ?@import("../../engine/graphics/csm.zig").ShadowCascades = null; + const render_ctx = render_graph_pkg.SceneContext{ .rhi = ctx.rhi.*, // SceneContext expects value for now .world = self.session.world, @@ -211,6 +242,10 @@ pub const WorldScreen = struct { .bloom_enabled = ctx.settings.bloom_enabled, .overlay_renderer = renderOverlay, .overlay_ctx = self, + .cached_cascades = &frame_cascades, + .lpv_texture_handle = ctx.lpv_system.getTextureHandle(), + .lpv_texture_handle_g = ctx.lpv_system.getTextureHandleG(), + .lpv_texture_handle_b = ctx.lpv_system.getTextureHandleB(), }; try ctx.render_graph.execute(render_ctx); } @@ -228,6 +263,34 @@ pub const WorldScreen = struct { if (ctx.settings.debug_shadows_active) { DebugShadowOverlay.draw(ctx.rhi.ui(), ctx.rhi.shadow(), screen_w, screen_h, .{}); } + if (ctx.settings.debug_lpv_overlay_active) { + const overlay_size = std.math.clamp(220.0 * ctx.settings.ui_scale, 160.0, screen_h * 0.4); + const cfg = DebugLPVOverlay.Config{ + .width = overlay_size, + .height = overlay_size, + .spacing = 10.0 * ctx.settings.ui_scale, + }; + const r = DebugLPVOverlay.rect(screen_h, cfg); + DebugLPVOverlay.draw(ctx.rhi.ui(), ctx.lpv_system.getDebugOverlayTextureHandle(), screen_w, screen_h, cfg); + + const stats = ctx.lpv_system.getStats(); + const timing_results = ctx.rhi.timing().getTimingResults(); + var line0_buf: [64]u8 = undefined; + var line1_buf: [64]u8 = undefined; + var line2_buf: [64]u8 = undefined; + var line3_buf: [64]u8 = undefined; + const line0 = std.fmt.bufPrint(&line0_buf, "LPV GRID:{d} ITER:{d}", .{ stats.grid_size, stats.propagation_iterations }) catch "LPV"; + const line1 = std.fmt.bufPrint(&line1_buf, "LIGHTS:{d} UPDATE:{d:.2}MS", .{ stats.light_count, stats.cpu_update_ms }) catch "LIGHTS"; + const line2 = std.fmt.bufPrint(&line2_buf, "TICK:{d} UPDATED:{d}", .{ stats.update_interval_frames, if (stats.updated_this_frame) @as(u8, 1) else @as(u8, 0) }) catch "TICK"; + const line3 = std.fmt.bufPrint(&line3_buf, "LPV GPU:{d:.2}MS", .{timing_results.lpv_pass_ms}) catch "GPU"; + + const text_x = r.x; + const text_y = r.y - 28.0; + Font.drawText(ui, line0, text_x, text_y, 1.5, .{ .r = 0.95, .g = 0.98, .b = 1.0, .a = 1.0 }); + Font.drawText(ui, line1, text_x, text_y + 10.0, 1.5, .{ .r = 0.95, .g = 0.98, .b = 1.0, .a = 1.0 }); + Font.drawText(ui, line2, text_x, text_y + 20.0, 1.5, .{ .r = 0.95, .g = 0.98, .b = 1.0, .a = 1.0 }); + Font.drawText(ui, line3, text_x, text_y + 30.0, 1.5, .{ .r = 0.95, .g = 0.98, .b = 1.0, .a = 1.0 }); + } } pub fn onEnter(ptr: *anyopaque) void { @@ -251,3 +314,17 @@ pub const WorldScreen = struct { self.session.hand_renderer.draw(scene_ctx.camera.position, scene_ctx.camera.yaw, scene_ctx.camera.pitch); } }; + +const LPVQualityResolved = struct { + grid_size: u32, + propagation_iterations: u32, + update_interval_frames: u32, +}; + +fn resolveLPVQuality(preset: u32) LPVQualityResolved { + return switch (preset) { + 0 => .{ .grid_size = 16, .propagation_iterations = 2, .update_interval_frames = 8 }, + 2 => .{ .grid_size = 64, .propagation_iterations = 5, .update_interval_frames = 3 }, + else => .{ .grid_size = 32, .propagation_iterations = 3, .update_interval_frames = 6 }, + }; +} diff --git a/src/game/settings/data.zig b/src/game/settings/data.zig index 33f74a53..5580de78 100644 --- a/src/game/settings/data.zig +++ b/src/game/settings/data.zig @@ -36,6 +36,7 @@ pub const Settings = struct { textures_enabled: bool = true, wireframe_enabled: bool = false, debug_shadows_active: bool = false, // Reverted to false for normal gameplay + debug_lpv_overlay_active: bool = false, shadow_quality: u32 = 2, // 0=Low, 1=Medium, 2=High, 3=Ultra shadow_distance: f32 = 250.0, anisotropic_filtering: u8 = 16, @@ -67,6 +68,15 @@ pub const Settings = struct { volumetric_scattering: f32 = 0.8, // Mie scattering anisotropy (G) ssao_enabled: bool = true, + // LPV Settings (Issue #260) + lpv_enabled: bool = true, + lpv_quality_preset: u32 = 1, // 0=Fast, 1=Balanced, 2=Quality + lpv_intensity: f32 = 0.5, + lpv_cell_size: f32 = 2.0, + lpv_grid_size: u32 = 32, // Derived from lpv_quality_preset at runtime + lpv_propagation_iterations: u32 = 3, // Derived from lpv_quality_preset at runtime + lpv_update_interval_frames: u32 = 6, // Derived from lpv_quality_preset at runtime + // FXAA Settings (Phase 3) fxaa_enabled: bool = true, @@ -74,6 +84,12 @@ pub const Settings = struct { bloom_enabled: bool = true, bloom_intensity: f32 = 0.5, + // Post-Processing Settings (Phase 6) + vignette_enabled: bool = false, + vignette_intensity: f32 = 0.3, + film_grain_enabled: bool = false, + film_grain_intensity: f32 = 0.15, + // Texture Settings max_texture_resolution: u32 = 512, // 16, 32, 64, 128, 256, 512 @@ -127,6 +143,11 @@ pub const Settings = struct { .label = "CASCADE BLENDING", .kind = .toggle, }; + pub const shadow_distance = SettingMetadata{ + .label = "SHADOW DISTANCE", + .description = "Maximum distance for shadow rendering (higher = more shadows but lower performance)", + .kind = .{ .slider = .{ .min = 100.0, .max = 1000.0, .step = 50.0 } }, + }; pub const pbr_enabled = SettingMetadata{ .label = "PBR RENDERING", .kind = .toggle, @@ -186,6 +207,24 @@ pub const Settings = struct { .label = "BLOOM INTENSITY", .kind = .{ .slider = .{ .min = 0.0, .max = 2.0, .step = 0.1 } }, }; + pub const vignette_enabled = SettingMetadata{ + .label = "VIGNETTE", + .description = "Darkens screen edges for cinematic effect", + .kind = .toggle, + }; + pub const vignette_intensity = SettingMetadata{ + .label = "VIGNETTE INTENSITY", + .kind = .{ .slider = .{ .min = 0.0, .max = 1.0, .step = 0.05 } }, + }; + pub const film_grain_enabled = SettingMetadata{ + .label = "FILM GRAIN", + .description = "Adds subtle noise for film-like appearance", + .kind = .toggle, + }; + pub const film_grain_intensity = SettingMetadata{ + .label = "GRAIN INTENSITY", + .kind = .{ .slider = .{ .min = 0.0, .max = 1.0, .step = 0.05 } }, + }; pub const volumetric_density = SettingMetadata{ .label = "FOG DENSITY", .kind = .{ .slider = .{ .min = 0.0, .max = 0.5, .step = 0.05 } }, @@ -198,6 +237,25 @@ pub const Settings = struct { .label = "VOLUMETRIC SCATTERING", .kind = .{ .slider = .{ .min = 0.0, .max = 1.0, .step = 0.05 } }, }; + pub const lpv_enabled = SettingMetadata{ + .label = "LPV GI", + .kind = .toggle, + }; + pub const lpv_quality_preset = SettingMetadata{ + .label = "LPV QUALITY", + .kind = .{ .choice = .{ + .labels = &[_][]const u8{ "FAST", "BALANCED", "QUALITY" }, + .values = &[_]u32{ 0, 1, 2 }, + } }, + }; + pub const lpv_intensity = SettingMetadata{ + .label = "LPV INTENSITY", + .kind = .{ .slider = .{ .min = 0.0, .max = 2.0, .step = 0.1 } }, + }; + pub const lpv_cell_size = SettingMetadata{ + .label = "LPV CELL SIZE", + .kind = .{ .slider = .{ .min = 1.0, .max = 4.0, .step = 0.25 } }, + }; }; pub fn getShadowResolution(self: *const Settings) u32 { diff --git a/src/game/settings/json_presets.zig b/src/game/settings/json_presets.zig index 8b439618..e7fa7d27 100644 --- a/src/game/settings/json_presets.zig +++ b/src/game/settings/json_presets.zig @@ -6,6 +6,7 @@ const Settings = data.Settings; pub const PresetConfig = struct { name: []u8, shadow_quality: u32, + shadow_distance: f32, shadow_pcf_samples: u8, shadow_cascade_blend: bool, pbr_enabled: bool, @@ -21,6 +22,12 @@ pub const PresetConfig = struct { volumetric_steps: u32, volumetric_scattering: f32, ssao_enabled: bool, + lpv_quality_preset: u32 = 1, + lpv_enabled: bool = true, + lpv_intensity: f32 = 0.5, + lpv_cell_size: f32 = 2.0, + lpv_grid_size: u32 = 32, + lpv_propagation_iterations: u32 = 3, lod_enabled: bool, render_distance: i32, fxaa_enabled: bool, @@ -49,17 +56,46 @@ pub fn initPresets(allocator: std.mem.Allocator) !void { for (parsed.value) |preset| { var p = preset; // Validate preset values against metadata constraints + // Skip invalid presets instead of failing entire load + if (p.shadow_distance < 100.0 or p.shadow_distance > 1000.0) { + std.log.warn("Skipping preset '{s}': invalid shadow_distance {}", .{ p.name, p.shadow_distance }); + continue; + } if (p.volumetric_density < 0.0 or p.volumetric_density > 0.5) { - return error.InvalidVolumetricDensity; + std.log.warn("Skipping preset '{s}': invalid volumetric_density {}", .{ p.name, p.volumetric_density }); + continue; } if (p.volumetric_steps < 4 or p.volumetric_steps > 32) { - return error.InvalidVolumetricSteps; + std.log.warn("Skipping preset '{s}': invalid volumetric_steps {}", .{ p.name, p.volumetric_steps }); + continue; } if (p.volumetric_scattering < 0.0 or p.volumetric_scattering > 1.0) { - return error.InvalidVolumetricScattering; + std.log.warn("Skipping preset '{s}': invalid volumetric_scattering {}", .{ p.name, p.volumetric_scattering }); + continue; } if (p.bloom_intensity < 0.0 or p.bloom_intensity > 2.0) { - return error.InvalidBloomIntensity; + std.log.warn("Skipping preset '{s}': invalid bloom_intensity {}", .{ p.name, p.bloom_intensity }); + continue; + } + if (p.lpv_intensity < 0.0 or p.lpv_intensity > 2.0) { + std.log.warn("Skipping preset '{s}': invalid lpv_intensity {}", .{ p.name, p.lpv_intensity }); + continue; + } + if (p.lpv_quality_preset > 2) { + std.log.warn("Skipping preset '{s}': invalid lpv_quality_preset {}", .{ p.name, p.lpv_quality_preset }); + continue; + } + if (p.lpv_cell_size < 1.0 or p.lpv_cell_size > 4.0) { + std.log.warn("Skipping preset '{s}': invalid lpv_cell_size {}", .{ p.name, p.lpv_cell_size }); + continue; + } + if (p.lpv_grid_size != 16 and p.lpv_grid_size != 32 and p.lpv_grid_size != 64) { + std.log.warn("Skipping preset '{s}': invalid lpv_grid_size {}", .{ p.name, p.lpv_grid_size }); + continue; + } + if (p.lpv_propagation_iterations < 1 or p.lpv_propagation_iterations > 8) { + std.log.warn("Skipping preset '{s}': invalid lpv_propagation_iterations {}", .{ p.name, p.lpv_propagation_iterations }); + continue; } // Duplicate name because parsed.deinit() will free strings p.name = try allocator.dupe(u8, preset.name); @@ -80,6 +116,7 @@ pub fn apply(settings: *Settings, preset_idx: usize) void { if (preset_idx >= graphics_presets.items.len) return; const config = graphics_presets.items[preset_idx]; settings.shadow_quality = config.shadow_quality; + settings.shadow_distance = config.shadow_distance; settings.shadow_pcf_samples = config.shadow_pcf_samples; settings.shadow_cascade_blend = config.shadow_cascade_blend; settings.pbr_enabled = config.pbr_enabled; @@ -95,6 +132,12 @@ pub fn apply(settings: *Settings, preset_idx: usize) void { settings.volumetric_steps = config.volumetric_steps; settings.volumetric_scattering = config.volumetric_scattering; settings.ssao_enabled = config.ssao_enabled; + settings.lpv_quality_preset = config.lpv_quality_preset; + settings.lpv_enabled = config.lpv_enabled; + settings.lpv_intensity = config.lpv_intensity; + settings.lpv_cell_size = config.lpv_cell_size; + settings.lpv_grid_size = config.lpv_grid_size; + settings.lpv_propagation_iterations = config.lpv_propagation_iterations; settings.lod_enabled = config.lod_enabled; settings.render_distance = config.render_distance; settings.fxaa_enabled = config.fxaa_enabled; @@ -112,6 +155,7 @@ pub fn getIndex(settings: *const Settings) usize { fn matches(settings: *const Settings, preset: PresetConfig) bool { const epsilon = 0.0001; return settings.shadow_quality == preset.shadow_quality and + std.math.approxEqAbs(f32, settings.shadow_distance, preset.shadow_distance, epsilon) and settings.shadow_pcf_samples == preset.shadow_pcf_samples and settings.shadow_cascade_blend == preset.shadow_cascade_blend and settings.pbr_enabled == preset.pbr_enabled and @@ -127,6 +171,12 @@ fn matches(settings: *const Settings, preset: PresetConfig) bool { std.math.approxEqAbs(f32, settings.volumetric_density, preset.volumetric_density, epsilon) and settings.volumetric_steps == preset.volumetric_steps and std.math.approxEqAbs(f32, settings.volumetric_scattering, preset.volumetric_scattering, epsilon) and + settings.lpv_quality_preset == preset.lpv_quality_preset and + settings.lpv_enabled == preset.lpv_enabled and + std.math.approxEqAbs(f32, settings.lpv_intensity, preset.lpv_intensity, epsilon) and + std.math.approxEqAbs(f32, settings.lpv_cell_size, preset.lpv_cell_size, epsilon) and + settings.lpv_grid_size == preset.lpv_grid_size and + settings.lpv_propagation_iterations == preset.lpv_propagation_iterations and settings.lod_enabled == preset.lod_enabled and settings.fxaa_enabled == preset.fxaa_enabled and settings.bloom_enabled == preset.bloom_enabled and diff --git a/src/tests.zig b/src/tests.zig index 8b5ec3d9..625935ae 100644 --- a/src/tests.zig +++ b/src/tests.zig @@ -10,6 +10,10 @@ const std = @import("std"); const testing = std.testing; +pub const std_options: std.Options = .{ + .log_level = .err, +}; + const Vec3 = @import("zig-math").Vec3; const Mat4 = @import("zig-math").Mat4; const AABB = @import("zig-math").AABB; @@ -31,6 +35,12 @@ const block_registry = @import("world/block_registry.zig"); const TextureAtlas = @import("engine/graphics/texture_atlas.zig").TextureAtlas; const BiomeId = @import("world/worldgen/biome.zig").BiomeId; +// Meshing stage modules +const ao_calculator = @import("world/meshing/ao_calculator.zig"); +const lighting_sampler = @import("world/meshing/lighting_sampler.zig"); +const biome_color_sampler = @import("world/meshing/biome_color_sampler.zig"); +const boundary = @import("world/meshing/boundary.zig"); + // Worldgen modules const Noise = @import("zig-noise").Noise; @@ -45,6 +55,7 @@ const BiomeSource = @import("world/worldgen/biome.zig").BiomeSource; test { _ = @import("ecs_tests.zig"); _ = @import("engine/graphics/vulkan_device.zig"); + _ = @import("engine/graphics/vulkan/ssao_system_tests.zig"); _ = @import("vulkan_tests.zig"); _ = @import("engine/graphics/rhi_tests.zig"); _ = @import("world/worldgen/schematics.zig"); @@ -919,6 +930,14 @@ const OverworldGenerator = @import("world/worldgen/overworld_generator.zig").Ove const deco_registry = @import("world/worldgen/decoration_registry.zig"); const Generator = @import("world/worldgen/generator_interface.zig").Generator; +fn chunkFingerprint(chunk: *const Chunk) u64 { + var hasher = std.hash.Wyhash.init(0); + hasher.update(std.mem.asBytes(&chunk.blocks)); + hasher.update(std.mem.asBytes(&chunk.biomes)); + hasher.update(std.mem.asBytes(&chunk.heightmap)); + return hasher.final(); +} + test "WorldGen same seed produces identical blocks at origin" { const allocator = testing.allocator; @@ -1097,6 +1116,31 @@ test "WorldGen golden output for known seed at origin" { try testing.expect(block_registry.getBlockDefinition(surface_block).is_solid); } +test "WorldGen stable chunk fingerprints for known seed" { + const allocator = testing.allocator; + const seed: u64 = 424242; + var gen = OverworldGenerator.init(seed, allocator, deco_registry.StandardDecorationProvider.provider()); + + const positions = [_][2]i32{ + .{ 0, 0 }, + .{ 17, -9 }, + .{ -23, 31 }, + }; + + const expected = [_]u64{ + 3930377586382103994, + 9537000129428755126, + 17337144674893402850, + }; + + for (positions, 0..) |pos, i| { + var chunk = Chunk.init(pos[0], pos[1]); + gen.generate(&chunk, null); + const fp = chunkFingerprint(&chunk); + try testing.expectEqual(expected[i], fp); + } +} + test "WorldGen populates heightmap and biomes" { const allocator = testing.allocator; var gen = OverworldGenerator.init(42, allocator, deco_registry.StandardDecorationProvider.provider()); @@ -1137,6 +1181,7 @@ test "Decoration placement" { test "OverworldGenerator with mock decoration provider" { const allocator = std.testing.allocator; const DecorationProvider = @import("world/worldgen/decoration_provider.zig").DecorationProvider; + const DecorationContext = @import("world/worldgen/decoration_provider.zig").DecorationProvider.DecorationContext; const MockProvider = struct { called_count: *usize, @@ -1152,29 +1197,8 @@ test "OverworldGenerator with mock decoration provider" { .decorate = decorate, }; - fn decorate( - ptr: ?*anyopaque, - chunk: *Chunk, - local_x: u32, - local_z: u32, - surface_y: i32, - surface_block: BlockType, - biome: BiomeId, - variant: f32, - allow_subbiomes: bool, - veg_mult: f32, - random: std.Random, - ) void { - _ = chunk; - _ = local_x; - _ = local_z; - _ = surface_y; - _ = surface_block; - _ = biome; - _ = variant; - _ = allow_subbiomes; - _ = veg_mult; - _ = random; + fn decorate(ptr: ?*anyopaque, ctx: DecorationContext) void { + _ = ctx; const count: *usize = @ptrCast(@alignCast(ptr.?)); count.* += 1; } @@ -1261,6 +1285,105 @@ test "adjacent transparent blocks share face" { try testing.expect(total_verts < 72); } +// ============================================================================ +// Meshing Stage Module Tests +// ============================================================================ + +test "calculateVertexAO both sides occluded returns 0.4" { + const ao = ao_calculator.calculateVertexAO(1.0, 1.0, 1.0); + try testing.expectApproxEqAbs(@as(f32, 0.4), ao, 0.001); +} + +test "calculateVertexAO no occlusion returns 1.0" { + const ao = ao_calculator.calculateVertexAO(0.0, 0.0, 0.0); + try testing.expectApproxEqAbs(@as(f32, 1.0), ao, 0.001); +} + +test "calculateVertexAO single side occlusion" { + const ao = ao_calculator.calculateVertexAO(1.0, 0.0, 0.0); + try testing.expectApproxEqAbs(@as(f32, 0.8), ao, 0.001); +} + +test "calculateVertexAO corner only occlusion" { + const ao = ao_calculator.calculateVertexAO(0.0, 0.0, 1.0); + try testing.expectApproxEqAbs(@as(f32, 0.8), ao, 0.001); +} + +test "normalizeLightValues zero light" { + const light = PackedLight.init(0, 0); + const norm = lighting_sampler.normalizeLightValues(light); + try testing.expectApproxEqAbs(@as(f32, 0.0), norm.skylight, 0.001); + try testing.expectApproxEqAbs(@as(f32, 0.0), norm.blocklight[0], 0.001); + try testing.expectApproxEqAbs(@as(f32, 0.0), norm.blocklight[1], 0.001); + try testing.expectApproxEqAbs(@as(f32, 0.0), norm.blocklight[2], 0.001); +} + +test "normalizeLightValues max light" { + const light = PackedLight.init(15, 15); + const norm = lighting_sampler.normalizeLightValues(light); + try testing.expectApproxEqAbs(@as(f32, 1.0), norm.skylight, 0.001); + try testing.expectApproxEqAbs(@as(f32, 1.0), norm.blocklight[0], 0.001); +} + +test "normalizeLightValues RGB channels" { + const light = PackedLight.initRGB(8, 4, 8, 12); + const norm = lighting_sampler.normalizeLightValues(light); + try testing.expectApproxEqAbs(@as(f32, 8.0 / 15.0), norm.skylight, 0.001); + try testing.expectApproxEqAbs(@as(f32, 4.0 / 15.0), norm.blocklight[0], 0.001); + try testing.expectApproxEqAbs(@as(f32, 8.0 / 15.0), norm.blocklight[1], 0.001); + try testing.expectApproxEqAbs(@as(f32, 12.0 / 15.0), norm.blocklight[2], 0.001); +} + +test "getBlockColor returns no tint for stone" { + var chunk = Chunk.init(0, 0); + const color = biome_color_sampler.getBlockColor(&chunk, .empty, .top, 0, 8, 8, .stone); + try testing.expectApproxEqAbs(@as(f32, 1.0), color[0], 0.001); + try testing.expectApproxEqAbs(@as(f32, 1.0), color[1], 0.001); + try testing.expectApproxEqAbs(@as(f32, 1.0), color[2], 0.001); +} + +test "getBlockColor returns no tint for grass side face" { + var chunk = Chunk.init(0, 0); + const color = biome_color_sampler.getBlockColor(&chunk, .empty, .east, 0, 8, 8, .grass); + try testing.expectApproxEqAbs(@as(f32, 1.0), color[0], 0.001); + try testing.expectApproxEqAbs(@as(f32, 1.0), color[1], 0.001); + try testing.expectApproxEqAbs(@as(f32, 1.0), color[2], 0.001); +} + +test "getBlockColor returns biome tint for grass top face" { + var chunk = Chunk.init(0, 0); + const color = biome_color_sampler.getBlockColor(&chunk, .empty, .top, 64, 8, 8, .grass); + // Plains biome grass color should not be {1, 1, 1} (it should be tinted) + try testing.expect(color[0] != 1.0 or color[1] != 1.0 or color[2] != 1.0); +} + +test "boundary getBlockCross returns air for null neighbors" { + var chunk = Chunk.init(0, 0); + // Access x = -1 with no west neighbor + const block = boundary.getBlockCross(&chunk, .empty, -1, 64, 8); + try testing.expectEqual(BlockType.air, block); +} + +test "boundary getBlockCross returns air for out-of-bounds y" { + var chunk = Chunk.init(0, 0); + chunk.setBlock(8, 64, 8, .stone); + // Access within chunk bounds + const block = boundary.getBlockCross(&chunk, .empty, 8, 64, 8); + try testing.expectEqual(BlockType.stone, block); +} + +test "boundary getBlockCross reads from neighbor chunk" { + var chunk = Chunk.init(0, 0); + var east_chunk = Chunk.init(1, 0); + east_chunk.setBlock(0, 64, 8, .dirt); + const neighbors = NeighborChunks{ + .east = &east_chunk, + }; + // Access x = 16 should read from east neighbor at x=0 + const block = boundary.getBlockCross(&chunk, neighbors, 16, 64, 8); + try testing.expectEqual(BlockType.dirt, block); +} + // ============================================================================ // Texture Atlas Tests // ============================================================================ @@ -1975,6 +2098,177 @@ test "BiomeSource getColor returns valid packed RGB" { try testing.expect(ocean_color != desert_color); } +// ============================================================================ +// Shadow Cascade Tests (Issue #243) +// ============================================================================ + +const CSM = @import("engine/graphics/csm.zig"); + +test "ShadowCascades initZero produces valid zero state" { + const cascades = CSM.ShadowCascades.initZero(); + // Zero-initialized cascades are NOT valid (splits must be > 0) + try testing.expect(!cascades.isValid()); + + // But all values must be finite (no NaN/Inf from uninitialized memory) + for (0..CSM.CASCADE_COUNT) |i| { + try testing.expect(std.math.isFinite(cascades.cascade_splits[i])); + try testing.expect(std.math.isFinite(cascades.texel_sizes[i])); + for (0..4) |row| { + for (0..4) |col| { + try testing.expect(std.math.isFinite(cascades.light_space_matrices[i].data[row][col])); + } + } + } +} + +test "ShadowCascades isValid rejects NaN splits" { + var cascades = CSM.ShadowCascades.initZero(); + // Set up valid-looking data first + for (0..CSM.CASCADE_COUNT) |i| { + cascades.cascade_splits[i] = @as(f32, @floatFromInt(i + 1)) * 50.0; + cascades.texel_sizes[i] = 0.1 * @as(f32, @floatFromInt(i + 1)); + cascades.light_space_matrices[i] = Mat4.identity; + } + try testing.expect(cascades.isValid()); + + // Inject NaN into one cascade split + cascades.cascade_splits[1] = std.math.nan(f32); + try testing.expect(!cascades.isValid()); +} + +test "ShadowCascades isValid rejects non-monotonic splits" { + var cascades = CSM.ShadowCascades.initZero(); + for (0..CSM.CASCADE_COUNT) |i| { + cascades.cascade_splits[i] = @as(f32, @floatFromInt(i + 1)) * 50.0; + cascades.texel_sizes[i] = 0.1 * @as(f32, @floatFromInt(i + 1)); + cascades.light_space_matrices[i] = Mat4.identity; + } + try testing.expect(cascades.isValid()); + + // Make splits non-monotonic + cascades.cascade_splits[2] = cascades.cascade_splits[1]; // equal, not increasing + try testing.expect(!cascades.isValid()); +} + +test "computeCascades produces valid output for typical inputs" { + const cascades = CSM.computeCascades( + 2048, + std.math.degreesToRadians(70.0), + 16.0 / 9.0, + 0.1, + 250.0, + Vec3.init(0.3, -1.0, 0.2).normalize(), + Mat4.identity, + true, + ); + + try testing.expect(cascades.isValid()); + + // Splits should be monotonically increasing and bounded by shadow distance + var last_split: f32 = 0.0; + for (0..CSM.CASCADE_COUNT) |i| { + try testing.expect(cascades.cascade_splits[i] > last_split); + try testing.expect(cascades.cascade_splits[i] <= 250.0); + last_split = cascades.cascade_splits[i]; + } + + // Last cascade should reach shadow distance + try testing.expectApproxEqAbs(@as(f32, 250.0), cascades.cascade_splits[CSM.CASCADE_COUNT - 1], 1.0); +} + +test "computeCascades deterministic with same inputs" { + const args = .{ + @as(u32, 2048), + std.math.degreesToRadians(@as(f32, 60.0)), + @as(f32, 16.0 / 9.0), + @as(f32, 0.1), + @as(f32, 200.0), + Vec3.init(0.5, -0.8, 0.3).normalize(), + Mat4.identity, + true, + }; + + const c1 = @call(.auto, CSM.computeCascades, args); + const c2 = @call(.auto, CSM.computeCascades, args); + + for (0..CSM.CASCADE_COUNT) |i| { + try testing.expectEqual(c1.cascade_splits[i], c2.cascade_splits[i]); + try testing.expectEqual(c1.texel_sizes[i], c2.texel_sizes[i]); + for (0..4) |row| { + for (0..4) |col| { + try testing.expectEqual( + c1.light_space_matrices[i].data[row][col], + c2.light_space_matrices[i].data[row][col], + ); + } + } + } +} + +test "computeCascades returns safe defaults for invalid inputs" { + // Zero resolution + const c1 = CSM.computeCascades(0, 1.0, 1.0, 0.1, 200.0, Vec3.init(0, -1, 0), Mat4.identity, true); + try testing.expectEqual(@as(f32, 0.0), c1.cascade_splits[0]); + + // far <= near + const c2 = CSM.computeCascades(1024, 1.0, 1.0, 200.0, 0.1, Vec3.init(0, -1, 0), Mat4.identity, true); + try testing.expectEqual(@as(f32, 0.0), c2.cascade_splits[0]); + + // near <= 0 + const c3 = CSM.computeCascades(1024, 1.0, 1.0, 0.0, 200.0, Vec3.init(0, -1, 0), Mat4.identity, true); + try testing.expectEqual(@as(f32, 0.0), c3.cascade_splits[0]); + + // Negative near plane + const c4 = CSM.computeCascades(1024, 1.0, 1.0, -0.1, 200.0, Vec3.init(0, -1, 0), Mat4.identity, true); + try testing.expectEqual(@as(f32, 0.0), c4.cascade_splits[0]); +} + +test "computeCascades stable at large world coordinates" { + // Test that texel snapping precision fix works at large coordinates + // by verifying matrices are finite and valid even with a camera far from origin + const far_view = Mat4.translate(Vec3.init(-50000.0, -100.0, -50000.0)); + const cascades = CSM.computeCascades( + 2048, + std.math.degreesToRadians(60.0), + 16.0 / 9.0, + 0.1, + 250.0, + Vec3.init(0.3, -1.0, 0.2).normalize(), + far_view, + true, + ); + + try testing.expect(cascades.isValid()); + + // All matrix elements must be finite (no precision overflow) + for (0..CSM.CASCADE_COUNT) |i| { + for (0..4) |row| { + for (0..4) |col| { + try testing.expect(std.math.isFinite(cascades.light_space_matrices[i].data[row][col])); + } + } + } +} + +test "computeCascades uses fixed splits for large distances" { + // Shadow distance > 500 triggers fixed split ratios (8%, 25%, 60%, 100%) + const cascades = CSM.computeCascades( + 2048, + std.math.degreesToRadians(60.0), + 16.0 / 9.0, + 0.1, + 1000.0, + Vec3.init(0, -1, 0), + Mat4.identity, + true, + ); + + try testing.expectApproxEqAbs(@as(f32, 80.0), cascades.cascade_splits[0], 0.1); // 8% + try testing.expectApproxEqAbs(@as(f32, 250.0), cascades.cascade_splits[1], 0.1); // 25% + try testing.expectApproxEqAbs(@as(f32, 600.0), cascades.cascade_splits[2], 0.1); // 60% + try testing.expectApproxEqAbs(@as(f32, 1000.0), cascades.cascade_splits[3], 0.1); // 100% +} + test "BiomeSource selectBiomeWithEdge no edge returns primary only" { const biome_mod = @import("world/worldgen/biome.zig"); const source = BiomeSource.init(); diff --git a/src/world/block.zig b/src/world/block.zig index 8062e307..26b541f7 100644 --- a/src/world/block.zig +++ b/src/world/block.zig @@ -119,6 +119,8 @@ pub const BlockType = enum(u8) { spruce_log = 42, spruce_leaves = 43, vine = 44, + torch = 45, + lava = 46, _, }; diff --git a/src/world/block_registry.zig b/src/world/block_registry.zig index 316cd4ed..a57980f8 100644 --- a/src/world/block_registry.zig +++ b/src/world/block_registry.zig @@ -207,6 +207,16 @@ pub const BLOCK_REGISTRY = blk: { def.texture_bottom = "spruce_log_top"; def.texture_side = "spruce_log_side"; }, + .torch => { + def.texture_top = "torch"; + def.texture_bottom = "torch"; + def.texture_side = "torch"; + }, + .lava => { + def.texture_top = "lava"; + def.texture_bottom = "lava"; + def.texture_side = "lava"; + }, else => {}, } @@ -257,18 +267,20 @@ pub const BLOCK_REGISTRY = blk: { .spruce_log => .{ 0.35, 0.25, 0.15 }, .spruce_leaves => .{ 0.15, 0.4, 0.15 }, .vine => .{ 0.2, 0.5, 0.1 }, + .torch => .{ 1.0, 0.8, 0.4 }, + .lava => .{ 1.0, 0.4, 0.1 }, else => .{ 1, 0, 1 }, }; // 2. Solid def.is_solid = switch (id) { - .air, .water => false, + .air, .water, .lava, .torch => false, else => true, }; // 3. Transparent def.is_transparent = switch (id) { - .air, .water, .glass, .leaves, .mangrove_leaves, .mangrove_roots, .jungle_leaves, .bamboo, .acacia_leaves, .acacia_sapling, .birch_leaves, .spruce_leaves, .vine, .tall_grass, .flower_red, .flower_yellow, .dead_bush, .cactus, .melon => true, + .air, .water, .lava, .glass, .leaves, .mangrove_leaves, .mangrove_roots, .jungle_leaves, .bamboo, .acacia_leaves, .acacia_sapling, .birch_leaves, .spruce_leaves, .vine, .tall_grass, .flower_red, .flower_yellow, .dead_bush, .cactus, .melon, .torch => true, else => false, }; @@ -280,20 +292,25 @@ pub const BLOCK_REGISTRY = blk: { // 5. Is Fluid def.is_fluid = switch (id) { - .water => true, + .water, .lava => true, else => false, }; // 6. Render Pass def.render_pass = switch (id) { - .water => .fluid, + .water, .lava => .fluid, .glass => .translucent, - .leaves, .mangrove_leaves, .jungle_leaves, .acacia_leaves, .birch_leaves, .spruce_leaves, .mangrove_roots, .bamboo, .acacia_sapling, .vine, .tall_grass, .flower_red, .flower_yellow, .dead_bush, .cactus, .melon => .cutout, + .leaves, .mangrove_leaves, .jungle_leaves, .acacia_leaves, .birch_leaves, .spruce_leaves, .mangrove_roots, .bamboo, .acacia_sapling, .vine, .tall_grass, .flower_red, .flower_yellow, .dead_bush, .cactus, .melon, .torch => .cutout, else => .solid, }; - // 7. Light Emission - def.light_emission = if (id == .glowstone) .{ 15, 14, 10 } else .{ 0, 0, 0 }; + // 7. Light Emission (RGB values 0-15) + def.light_emission = switch (id) { + .glowstone => .{ 15, 14, 10 }, // Warm yellow + .torch => .{ 15, 11, 6 }, // Warm orange + .lava => .{ 15, 8, 3 }, // Red-orange + else => .{ 0, 0, 0 }, + }; definitions[int_id] = def; } diff --git a/src/world/chunk_mesh.zig b/src/world/chunk_mesh.zig index a9c68898..d6bebeb0 100644 --- a/src/world/chunk_mesh.zig +++ b/src/world/chunk_mesh.zig @@ -1,51 +1,36 @@ -//! Chunk mesh generation with Greedy Meshing and Subchunks. +//! Chunk mesh orchestrator — coordinates meshing stages and manages GPU lifecycle. //! -//! Vertices are built per-subchunk for greedy meshing efficiency, -//! then merged into single solid/fluid buffers for minimal draw calls. +//! Vertices are built per-subchunk via the greedy mesher, then merged into +//! single solid/fluid buffers for minimal draw calls. Meshing logic is +//! delegated to modules in `meshing/`. const std = @import("std"); const Chunk = @import("chunk.zig").Chunk; -const PackedLight = @import("chunk.zig").PackedLight; const CHUNK_SIZE_X = @import("chunk.zig").CHUNK_SIZE_X; -const CHUNK_SIZE_Y = @import("chunk.zig").CHUNK_SIZE_Y; const CHUNK_SIZE_Z = @import("chunk.zig").CHUNK_SIZE_Z; -const BlockType = @import("block.zig").BlockType; -const block_registry = @import("block_registry.zig"); -const Face = @import("block.zig").Face; -const ALL_FACES = @import("block.zig").ALL_FACES; const TextureAtlas = @import("../engine/graphics/texture_atlas.zig").TextureAtlas; -const biome_mod = @import("worldgen/biome.zig"); const rhi_mod = @import("../engine/graphics/rhi.zig"); const RHI = rhi_mod.RHI; const Vertex = rhi_mod.Vertex; -const BufferHandle = rhi_mod.BufferHandle; const chunk_alloc_mod = @import("chunk_allocator.zig"); const GlobalVertexAllocator = chunk_alloc_mod.GlobalVertexAllocator; const VertexAllocation = chunk_alloc_mod.VertexAllocation; -pub const SUBCHUNK_SIZE = 16; -pub const NUM_SUBCHUNKS = 16; +// Meshing stage modules +const greedy_mesher = @import("meshing/greedy_mesher.zig"); +const boundary = @import("meshing/boundary.zig"); + +// Re-export public types for external consumers +pub const NeighborChunks = boundary.NeighborChunks; +pub const SUBCHUNK_SIZE = boundary.SUBCHUNK_SIZE; +pub const NUM_SUBCHUNKS = boundary.NUM_SUBCHUNKS; pub const Pass = enum { solid, fluid, }; -pub const NeighborChunks = struct { - north: ?*const Chunk = null, - south: ?*const Chunk = null, - east: ?*const Chunk = null, - west: ?*const Chunk = null, - - pub const empty = NeighborChunks{ - .north = null, - .south = null, - .east = null, - .west = null, - }; -}; - /// Merged chunk mesh with single solid/fluid buffers for minimal draw calls. /// Subchunk data is only used during mesh building, then merged. pub const ChunkMesh = struct { @@ -105,6 +90,8 @@ pub const ChunkMesh = struct { } } + /// Build the full chunk mesh from chunk data and neighbors. + /// Delegates greedy meshing to the meshing stage modules. pub fn buildWithNeighbors(self: *ChunkMesh, chunk: *const Chunk, neighbors: NeighborChunks, atlas: *const TextureAtlas) !void { // Build each subchunk separately (greedy meshing works per Y slice) for (0..NUM_SUBCHUNKS) |i| { @@ -124,17 +111,20 @@ pub const ChunkMesh = struct { const y0: i32 = @intCast(si * SUBCHUNK_SIZE); const y1: i32 = y0 + SUBCHUNK_SIZE; + // Mesh horizontal slices (top/bottom faces) var sy: i32 = y0; while (sy <= y1) : (sy += 1) { - try self.meshSlice(chunk, neighbors, .top, sy, si, &solid_verts, &fluid_verts, atlas); + try greedy_mesher.meshSlice(self.allocator, chunk, neighbors, .top, sy, si, &solid_verts, &fluid_verts, atlas); } + // Mesh east/west face slices var sx: i32 = 0; while (sx <= CHUNK_SIZE_X) : (sx += 1) { - try self.meshSlice(chunk, neighbors, .east, sx, si, &solid_verts, &fluid_verts, atlas); + try greedy_mesher.meshSlice(self.allocator, chunk, neighbors, .east, sx, si, &solid_verts, &fluid_verts, atlas); } + // Mesh south/north face slices var sz: i32 = 0; while (sz <= CHUNK_SIZE_Z) : (sz += 1) { - try self.meshSlice(chunk, neighbors, .south, sz, si, &solid_verts, &fluid_verts, atlas); + try greedy_mesher.meshSlice(self.allocator, chunk, neighbors, .south, sz, si, &solid_verts, &fluid_verts, atlas); } // Store subchunk data temporarily (will be merged later) @@ -177,10 +167,10 @@ pub const ChunkMesh = struct { var merged = try self.allocator.alloc(Vertex, total_solid); var offset: usize = 0; for (0..NUM_SUBCHUNKS) |i| { - if (self.subchunk_solid[i]) |v| { - @memcpy(merged[offset..][0..v.len], v); - offset += v.len; - self.allocator.free(v); + if (self.subchunk_solid[i]) |v_slice| { + @memcpy(merged[offset..][0..v_slice.len], v_slice); + offset += v_slice.len; + self.allocator.free(v_slice); self.subchunk_solid[i] = null; } } @@ -194,10 +184,10 @@ pub const ChunkMesh = struct { var merged = try self.allocator.alloc(Vertex, total_fluid); var offset: usize = 0; for (0..NUM_SUBCHUNKS) |i| { - if (self.subchunk_fluid[i]) |v| { - @memcpy(merged[offset..][0..v.len], v); - offset += v.len; - self.allocator.free(v); + if (self.subchunk_fluid[i]) |v_slice| { + @memcpy(merged[offset..][0..v_slice.len], v_slice); + offset += v_slice.len; + self.allocator.free(v_slice); self.subchunk_fluid[i] = null; } } @@ -207,114 +197,6 @@ pub const ChunkMesh = struct { } } - const FaceKey = struct { - block: BlockType, - side: bool, - light: PackedLight, - color: [3]f32, - }; - - fn meshSlice(self: *ChunkMesh, chunk: *const Chunk, neighbors: NeighborChunks, axis: Face, s: i32, si: u32, solid_list: *std.ArrayListUnmanaged(Vertex), fluid_list: *std.ArrayListUnmanaged(Vertex), atlas: *const TextureAtlas) !void { - const du: u32 = 16; - const dv: u32 = 16; - var mask = try self.allocator.alloc(?FaceKey, du * dv); - defer self.allocator.free(mask); - @memset(mask, null); - - var v: u32 = 0; - while (v < dv) : (v += 1) { - var u: u32 = 0; - while (u < du) : (u += 1) { - const res = getBlocksAtBoundary(chunk, neighbors, axis, s, u, v, si); - const b1 = res[0]; - const b2 = res[1]; - - const y_min: i32 = @intCast(si * SUBCHUNK_SIZE); - const y_max: i32 = y_min + SUBCHUNK_SIZE; - - const b1_def = block_registry.getBlockDefinition(b1); - const b2_def = block_registry.getBlockDefinition(b2); - - const b1_emits = b1_def.is_solid or (b1_def.is_fluid and !b2_def.is_fluid); - const b2_emits = b2_def.is_solid or (b2_def.is_fluid and !b1_def.is_fluid); - - if (isEmittingSubchunk(axis, s - 1, u, v, y_min, y_max) and b1_emits and !b2_def.occludes(b1_def, axis)) { - const light = getLightAtBoundary(chunk, neighbors, axis, s, u, v, si); - const color = getBlockColor(chunk, neighbors, axis, s - 1, u, v, b1); - mask[u + v * du] = .{ .block = b1, .side = true, .light = light, .color = color }; - } else if (isEmittingSubchunk(axis, s, u, v, y_min, y_max) and b2_emits and !b1_def.occludes(b2_def, axis)) { - const light = getLightAtBoundary(chunk, neighbors, axis, s, u, v, si); - const color = getBlockColor(chunk, neighbors, axis, s, u, v, b2); - mask[u + v * du] = .{ .block = b2, .side = false, .light = light, .color = color }; - } - } - } - - var sv: u32 = 0; - while (sv < dv) : (sv += 1) { - var su: u32 = 0; - while (su < du) : (su += 1) { - const k_opt = mask[su + sv * du]; - if (k_opt == null) continue; - const k = k_opt.?; - - var width: u32 = 1; - while (su + width < du) : (width += 1) { - const nxt_opt = mask[su + width + sv * du]; - if (nxt_opt == null) break; - const nxt = nxt_opt.?; - if (nxt.block != k.block or nxt.side != k.side) break; - const sky_diff = @as(i8, @intCast(nxt.light.getSkyLight())) - @as(i8, @intCast(k.light.getSkyLight())); - const r_diff = @as(i8, @intCast(nxt.light.getBlockLightR())) - @as(i8, @intCast(k.light.getBlockLightR())); - const g_diff = @as(i8, @intCast(nxt.light.getBlockLightG())) - @as(i8, @intCast(k.light.getBlockLightG())); - const b_diff = @as(i8, @intCast(nxt.light.getBlockLightB())) - @as(i8, @intCast(k.light.getBlockLightB())); - if (@abs(sky_diff) > 1 or @abs(r_diff) > 1 or @abs(g_diff) > 1 or @abs(b_diff) > 1) break; - - const diff_r = @abs(nxt.color[0] - k.color[0]); - const diff_g = @abs(nxt.color[1] - k.color[1]); - const diff_b = @abs(nxt.color[2] - k.color[2]); - if (diff_r > 0.02 or diff_g > 0.02 or diff_b > 0.02) break; - } - var height: u32 = 1; - var dvh: u32 = 1; - outer: while (sv + dvh < dv) : (dvh += 1) { - var duw: u32 = 0; - while (duw < width) : (duw += 1) { - const nxt_opt = mask[su + duw + (sv + dvh) * du]; - if (nxt_opt == null) break :outer; - const nxt = nxt_opt.?; - if (nxt.block != k.block or nxt.side != k.side) break :outer; - const sky_diff = @as(i8, @intCast(nxt.light.getSkyLight())) - @as(i8, @intCast(k.light.getSkyLight())); - const r_diff = @as(i8, @intCast(nxt.light.getBlockLightR())) - @as(i8, @intCast(k.light.getBlockLightR())); - const g_diff = @as(i8, @intCast(nxt.light.getBlockLightG())) - @as(i8, @intCast(k.light.getBlockLightG())); - const b_diff = @as(i8, @intCast(nxt.light.getBlockLightB())) - @as(i8, @intCast(k.light.getBlockLightB())); - if (@abs(sky_diff) > 1 or @abs(r_diff) > 1 or @abs(g_diff) > 1 or @abs(b_diff) > 1) break :outer; - - const diff_r = @abs(nxt.color[0] - k.color[0]); - const diff_g = @abs(nxt.color[1] - k.color[1]); - const diff_b = @abs(nxt.color[2] - k.color[2]); - if (diff_r > 0.02 or diff_g > 0.02 or diff_b > 0.02) break :outer; - } - height += 1; - } - - const k_def = block_registry.getBlockDefinition(k.block); - const target = if (k_def.render_pass == .fluid) fluid_list else solid_list; - try addGreedyFace(self.allocator, target, axis, s, su, sv, width, height, k_def, k.side, si, k.light, k.color, chunk, neighbors, atlas); - - var dy: u32 = 0; - while (dy < height) : (dy += 1) { - var dx: u32 = 0; - while (dx < width) : (dx += 1) { - mask[su + dx + (sv + dy) * du] = null; - } - } - su += width - 1; - } - } - } - - /// Upload pending mesh data to the GPU using GlobalVertexAllocator. /// Upload pending mesh data to the GPU using GlobalVertexAllocator. pub fn upload(self: *ChunkMesh, allocator: *GlobalVertexAllocator) void { self.mutex.lock(); @@ -386,319 +268,3 @@ pub const ChunkMesh = struct { } } }; - -fn isEmittingSubchunk(axis: Face, s: i32, u: u32, v: u32, y_min: i32, y_max: i32) bool { - const y: i32 = switch (axis) { - .top => s, - .east => @as(i32, @intCast(u)) + y_min, - .south => @as(i32, @intCast(v)) + y_min, - else => unreachable, - }; - return y >= y_min and y < y_max; -} - -fn getBlocksAtBoundary(chunk: *const Chunk, neighbors: NeighborChunks, axis: Face, s: i32, u: u32, v: u32, si: u32) [2]BlockType { - const y_off: i32 = @intCast(si * SUBCHUNK_SIZE); - return switch (axis) { - .top => .{ chunk.getBlockSafe(@intCast(u), s - 1, @intCast(v)), chunk.getBlockSafe(@intCast(u), s, @intCast(v)) }, - .east => .{ - getBlockCross(chunk, neighbors, s - 1, y_off + @as(i32, @intCast(u)), @intCast(v)), - getBlockCross(chunk, neighbors, s, y_off + @as(i32, @intCast(u)), @intCast(v)), - }, - .south => .{ - getBlockCross(chunk, neighbors, @intCast(u), y_off + @as(i32, @intCast(v)), s - 1), - getBlockCross(chunk, neighbors, @intCast(u), y_off + @as(i32, @intCast(v)), s), - }, - else => unreachable, - }; -} - -fn getBlockCross(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, y: i32, z: i32) BlockType { - if (x < 0) return if (neighbors.west) |w| w.getBlockSafe(CHUNK_SIZE_X - 1, y, z) else .air; - if (x >= CHUNK_SIZE_X) return if (neighbors.east) |e| e.getBlockSafe(0, y, z) else .air; - if (z < 0) return if (neighbors.north) |n| n.getBlockSafe(x, y, CHUNK_SIZE_Z - 1) else .air; - if (z >= CHUNK_SIZE_Z) return if (neighbors.south) |s| s.getBlockSafe(x, y, 0) else .air; - return chunk.getBlockSafe(x, y, z); -} - -fn getLightAtBoundary(chunk: *const Chunk, neighbors: NeighborChunks, axis: Face, s: i32, u: u32, v: u32, si: u32) PackedLight { - const y_off: i32 = @intCast(si * SUBCHUNK_SIZE); - return switch (axis) { - .top => chunk.getLightSafe(@intCast(u), s, @intCast(v)), - .east => getLightCross(chunk, neighbors, s, y_off + @as(i32, @intCast(u)), @intCast(v)), - .south => getLightCross(chunk, neighbors, @intCast(u), y_off + @as(i32, @intCast(v)), s), - else => unreachable, - }; -} - -fn getLightCross(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, y: i32, z: i32) PackedLight { - const MAX_LIGHT = @import("chunk.zig").MAX_LIGHT; - if (y >= CHUNK_SIZE_Y) return PackedLight.init(MAX_LIGHT, 0); - if (y < 0) return PackedLight.init(0, 0); - - if (x < 0) return if (neighbors.west) |w| w.getLightSafe(CHUNK_SIZE_X - 1, y, z) else PackedLight.init(MAX_LIGHT, 0); - if (x >= CHUNK_SIZE_X) return if (neighbors.east) |e| e.getLightSafe(0, y, z) else PackedLight.init(MAX_LIGHT, 0); - if (z < 0) return if (neighbors.north) |n| n.getLightSafe(x, y, CHUNK_SIZE_Z - 1) else PackedLight.init(MAX_LIGHT, 0); - if (z >= CHUNK_SIZE_Z) return if (neighbors.south) |s| s.getLightSafe(x, y, 0) else PackedLight.init(MAX_LIGHT, 0); - return chunk.getLightSafe(x, y, z); -} - -fn getAOAt(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, y: i32, z: i32) f32 { - if (y < 0 or y >= CHUNK_SIZE_Y) return 0; - - const b: BlockType = blk: { - if (x < 0) { - if (z < 0 or z >= CHUNK_SIZE_Z) break :blk .air; // Lack of diagonal neighbors - break :blk if (neighbors.west) |w| w.getBlock(CHUNK_SIZE_X - 1, @intCast(y), @intCast(z)) else .air; - } else if (x >= CHUNK_SIZE_X) { - if (z < 0 or z >= CHUNK_SIZE_Z) break :blk .air; - break :blk if (neighbors.east) |e| e.getBlock(0, @intCast(y), @intCast(z)) else .air; - } else if (z < 0) { - // x is already checked to be [0, CHUNK_SIZE_X-1] - break :blk if (neighbors.north) |n| n.getBlock(@intCast(x), @intCast(y), CHUNK_SIZE_Z - 1) else .air; - } else if (z >= CHUNK_SIZE_Z) { - break :blk if (neighbors.south) |s| s.getBlock(@intCast(x), @intCast(y), 0) else .air; - } else { - break :blk chunk.getBlock(@intCast(x), @intCast(y), @intCast(z)); - } - }; - - const b_def = block_registry.getBlockDefinition(b); - return if (b_def.is_solid and !b_def.is_transparent) 1.0 else 0.0; -} - -fn calculateVertexAO(s1: f32, s2: f32, c: f32) f32 { - if (s1 > 0.5 and s2 > 0.5) return 0.4; - return 1.0 - (s1 + s2 + c) * 0.2; -} - -fn addGreedyFace(allocator: std.mem.Allocator, verts: *std.ArrayListUnmanaged(Vertex), axis: Face, s: i32, u: u32, v: u32, w: u32, h: u32, block_def: *const block_registry.BlockDefinition, forward: bool, si: u32, light: PackedLight, tint: [3]f32, chunk: *const Chunk, neighbors: NeighborChunks, atlas: *const TextureAtlas) !void { - const face = if (forward) axis else switch (axis) { - .top => Face.bottom, - .east => Face.west, - .south => Face.north, - else => unreachable, - }; - const base_col = block_def.getFaceColor(face); - const col = [3]f32{ base_col[0] * tint[0], base_col[1] * tint[1], base_col[2] * tint[2] }; - const norm = face.getNormal(); - const nf = [3]f32{ @floatFromInt(norm[0]), @floatFromInt(norm[1]), @floatFromInt(norm[2]) }; - const tiles = atlas.getTilesForBlock(@intFromEnum(block_def.id)); - const tid: f32 = @floatFromInt(switch (face) { - .top => tiles.top, - .bottom => tiles.bottom, - else => tiles.side, - }); - const wf: f32 = @floatFromInt(w); - const hf: f32 = @floatFromInt(h); - const sf: f32 = @floatFromInt(s); - const uf: f32 = @floatFromInt(u); - const vf: f32 = @floatFromInt(v); - - var p: [4][3]f32 = undefined; - var uv: [4][2]f32 = undefined; - if (axis == .top) { - const y = sf; - if (forward) { - p[0] = .{ uf, y, vf + hf }; - p[1] = .{ uf + wf, y, vf + hf }; - p[2] = .{ uf + wf, y, vf }; - p[3] = .{ uf, y, vf }; - } else { - p[0] = .{ uf, y, vf }; - p[1] = .{ uf + wf, y, vf }; - p[2] = .{ uf + wf, y, vf + hf }; - p[3] = .{ uf, y, vf + hf }; - } - uv = [4][2]f32{ .{ 0, 0 }, .{ wf, 0 }, .{ wf, hf }, .{ 0, hf } }; - } else if (axis == .east) { - const x = sf; - const y0: f32 = @floatFromInt(si * SUBCHUNK_SIZE); - if (forward) { - p[0] = .{ x, y0 + uf, vf + hf }; - p[1] = .{ x, y0 + uf, vf }; - p[2] = .{ x, y0 + uf + wf, vf }; - p[3] = .{ x, y0 + uf + wf, vf + hf }; - } else { - p[0] = .{ x, y0 + uf, vf }; - p[1] = .{ x, y0 + uf, vf + hf }; - p[2] = .{ x, y0 + uf + wf, vf + hf }; - p[3] = .{ x, y0 + uf + wf, vf }; - } - uv = [4][2]f32{ .{ 0, wf }, .{ hf, wf }, .{ hf, 0 }, .{ 0, 0 } }; - } else { - const z = sf; - const y0: f32 = @floatFromInt(si * SUBCHUNK_SIZE); - if (forward) { - p[0] = .{ uf, y0 + vf, z }; - p[1] = .{ uf + wf, y0 + vf, z }; - p[2] = .{ uf + wf, y0 + vf + hf, z }; - p[3] = .{ uf, y0 + vf + hf, z }; - } else { - p[0] = .{ uf + wf, y0 + vf, z }; - p[1] = .{ uf, y0 + vf, z }; - p[2] = .{ uf, y0 + vf + hf, z }; - p[3] = .{ uf + wf, y0 + vf + hf, z }; - } - uv = [4][2]f32{ .{ 0, hf }, .{ wf, hf }, .{ wf, 0 }, .{ 0, 0 } }; - } - - // Calculate AO for each corner of the quad - var ao: [4]f32 = undefined; - for (0..4) |i| { - const vertex_pos = p[i]; - // Determine the three neighbor blocks to check for this vertex. - // We need to know which directions are 'outside' from this corner. - // We can find this by comparing the vertex position to the face center. - const center = [3]f32{ - (p[0][0] + p[2][0]) * 0.5, - (p[0][1] + p[2][1]) * 0.5, - (p[0][2] + p[2][2]) * 0.5, - }; - - const dir_x: i32 = if (vertex_pos[0] > center[0]) 0 else -1; - const dir_y: i32 = if (vertex_pos[1] > center[1]) 0 else -1; - const dir_z: i32 = if (vertex_pos[2] > center[2]) 0 else -1; - - const vx = @as(i32, @intFromFloat(@floor(vertex_pos[0]))); - const vy = @as(i32, @intFromFloat(@floor(vertex_pos[1]))); - const vz = @as(i32, @intFromFloat(@floor(vertex_pos[2]))); - - var s1: f32 = 0; - var s2: f32 = 0; - var c: f32 = 0; - - if (axis == .top) { - const y_off: i32 = if (forward) 0 else -1; - s1 = getAOAt(chunk, neighbors, vx + dir_x, vy + y_off, vz); - s2 = getAOAt(chunk, neighbors, vx, vy + y_off, vz + dir_z); - c = getAOAt(chunk, neighbors, vx + dir_x, vy + y_off, vz + dir_z); - } else if (axis == .east) { - const x_off: i32 = if (forward) 0 else -1; - s1 = getAOAt(chunk, neighbors, vx + x_off, vy + dir_y, vz); - s2 = getAOAt(chunk, neighbors, vx + x_off, vy, vz + dir_z); - c = getAOAt(chunk, neighbors, vx + x_off, vy + dir_y, vz + dir_z); - } else if (axis == .south) { - const z_off: i32 = if (forward) 0 else -1; - s1 = getAOAt(chunk, neighbors, vx + dir_x, vy, vz + z_off); - s2 = getAOAt(chunk, neighbors, vx, vy + dir_y, vz + z_off); - c = getAOAt(chunk, neighbors, vx + dir_x, vy + dir_y, vz + z_off); - } - - ao[i] = calculateVertexAO(s1, s2, c); - } - - // Choose triangle orientation to minimize AO artifacts (flipping the diagonal) - var idxs: [6]usize = undefined; - // Correct flipping: if A+C < B+D, then diagonal B-D is brighter. - if (ao[0] + ao[2] < ao[1] + ao[3]) { - idxs = .{ 1, 2, 3, 1, 3, 0 }; - } else { - idxs = .{ 0, 1, 2, 0, 2, 3 }; - } - - const sky_norm = @as(f32, @floatFromInt(light.getSkyLight())) / 15.0; - const block_norm = [3]f32{ - @as(f32, @floatFromInt(light.getBlockLightR())) / 15.0, - @as(f32, @floatFromInt(light.getBlockLightG())) / 15.0, - @as(f32, @floatFromInt(light.getBlockLightB())) / 15.0, - }; - - for (idxs) |i| { - try verts.append(allocator, Vertex{ - .pos = p[i], - .color = col, - .normal = nf, - .uv = uv[i], - .tile_id = tid, - .skylight = sky_norm, - .blocklight = block_norm, - .ao = ao[i], - }); - } -} - -fn getBiomeAt(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, z: i32) biome_mod.BiomeId { - if (x < 0) { - if (z >= 0 and z < CHUNK_SIZE_Z) { - if (neighbors.west) |w| return w.getBiome(CHUNK_SIZE_X - 1, @intCast(z)); - } - return chunk.getBiome(0, @intCast(std.math.clamp(z, 0, CHUNK_SIZE_Z - 1))); - } - if (x >= CHUNK_SIZE_X) { - if (z >= 0 and z < CHUNK_SIZE_Z) { - if (neighbors.east) |e| return e.getBiome(0, @intCast(z)); - } - return chunk.getBiome(CHUNK_SIZE_X - 1, @intCast(std.math.clamp(z, 0, CHUNK_SIZE_Z - 1))); - } - if (z < 0) { - if (neighbors.north) |n| return n.getBiome(@intCast(x), CHUNK_SIZE_Z - 1); - return chunk.getBiome(@intCast(x), 0); - } - if (z >= CHUNK_SIZE_Z) { - if (neighbors.south) |s| return s.getBiome(@intCast(x), 0); - return chunk.getBiome(@intCast(x), CHUNK_SIZE_Z - 1); - } - return chunk.getBiome(@intCast(x), @intCast(z)); -} - -/// Calculates the average color of the block's biome at the given face coordinates. -/// `s`, `u`, `v` are local coordinates on the slice plane (depending on `axis`). -fn getBlockColor(chunk: *const Chunk, neighbors: NeighborChunks, axis: Face, s: i32, u: u32, v: u32, block: BlockType) [3]f32 { - // Only apply biome tint to top face of grass, and all faces of leaves/water - if (block == .grass) { - - // Grass: only tint the top face, sides and bottom get no tint - if (axis != .top) return .{ 1.0, 1.0, 1.0 }; - } else if (block != .leaves and block != .water) { - return .{ 1.0, 1.0, 1.0 }; - } - - var x: i32 = undefined; - var z: i32 = undefined; - - switch (axis) { - .top => { - x = @intCast(u); - z = @intCast(v); - }, - .east => { - x = s; - z = @intCast(v); - }, - .south => { - x = @intCast(u); - z = s; - }, - else => { - x = @intCast(u); - z = @intCast(v); - }, - } - - var r: f32 = 0; - var g: f32 = 0; - var b: f32 = 0; - var count: f32 = 0; - - var ox: i32 = -1; - while (ox <= 1) : (ox += 1) { - var oz: i32 = -1; - while (oz <= 1) : (oz += 1) { - const biome_id = getBiomeAt(chunk, neighbors, x + ox, z + oz); - const def = biome_mod.getBiomeDefinition(biome_id); - const col = switch (block) { - .grass => def.colors.grass, - .leaves => def.colors.foliage, - .water => def.colors.water, - else => .{ 1.0, 1.0, 1.0 }, - }; - r += col[0]; - g += col[1]; - b += col[2]; - count += 1.0; - } - } - - return .{ r / count, g / count, b / count }; -} diff --git a/src/world/lod_manager.zig b/src/world/lod_manager.zig index a2349f53..cfb7c109 100644 --- a/src/world/lod_manager.zig +++ b/src/world/lod_manager.zig @@ -11,6 +11,7 @@ //! - LOD0 generates last but gets priority in movement direction //! - Smooth transitions via fog masking //! +//! GPU operations are decoupled via LODGPUBridge and LODRenderInterface (Issue #246). const std = @import("std"); const lod_chunk = @import("lod_chunk.zig"); @@ -46,7 +47,12 @@ const RingBuffer = @import("../engine/core/ring_buffer.zig").RingBuffer; const Generator = @import("worldgen/generator_interface.zig").Generator; const LODMesh = @import("lod_mesh.zig").LODMesh; -const LODRenderer = @import("lod_renderer.zig").LODRenderer; + +const lod_gpu = @import("lod_upload_queue.zig"); +const LODGPUBridge = lod_gpu.LODGPUBridge; +const LODRenderInterface = lod_gpu.LODRenderInterface; +const MeshMap = lod_gpu.MeshMap; +const RegionMap = lod_gpu.RegionMap; const MAX_LOD_REGIONS = 2048; @@ -68,6 +74,7 @@ pub const LODStats = struct { memory_used_mb: u32 = 0, upgrades_pending: u32 = 0, downgrades_pending: u32 = 0, + upload_failures: u32 = 0, pub fn totalLoaded(self: *const LODStats) u32 { var total: u32 = 0; @@ -91,6 +98,7 @@ pub const LODStats = struct { self.memory_used_mb = 0; self.upgrades_pending = 0; self.downgrades_pending = 0; + self.upload_failures = 0; } pub fn recordState(self: *LODStats, lod_idx: usize, state: LODState) void { @@ -118,825 +126,817 @@ const LODTransition = struct { priority: i32, }; -/// Expected RHI interface for LODManager: -/// - createBuffer(size: usize, usage: BufferUsage) !BufferHandle -/// - destroyBuffer(handle: BufferHandle) void -/// - uploadBuffer(handle: BufferHandle, data: []const u8) !void -/// - waitIdle() void -/// - getFrameIndex() usize -/// - setModelMatrix(model: Mat4, color: Vec3, mask_radius: f32) void -/// - draw(handle: BufferHandle, count: u32, mode: DrawMode) void -/// -/// Main LOD Manager - coordinates all LOD levels -/// Generic over RHI type to allow mocking/DIP -pub fn LODManager(comptime RHI: type) type { - return struct { - const Self = @This(); +/// LOD Manager - coordinates all LOD levels. +/// Uses callback interfaces (LODGPUBridge, LODRenderInterface) for GPU operations +/// instead of a direct RHI dependency. +pub const LODManager = struct { + const Self = @This(); + + allocator: std.mem.Allocator, + config: ILODConfig, + + // Storage per LOD level (LOD0 uses existing World.chunks) + regions: [LODLevel.count]RegionMap, - allocator: std.mem.Allocator, - config: ILODConfig, + // Mesh storage per LOD level + meshes: [LODLevel.count]MeshMap, - // Storage per LOD level (LOD0 uses existing World.chunks) - regions: [LODLevel.count]std.HashMap(LODRegionKey, *LODChunk, LODRegionKeyContext, 80), + // Separate job queues per LOD level + // LOD3 queue processes first (fast), LOD0 queue last (slow but priority) + gen_queues: [LODLevel.count]*JobQueue, - // Mesh storage per LOD level - meshes: [LODLevel.count]std.HashMap(LODRegionKey, *LODMesh, LODRegionKeyContext, 80), + // Worker pool for LOD generation + lod_gen_pool: ?*WorkerPool, - // Separate job queues per LOD level - // LOD3 queue processes first (fast), LOD0 queue last (slow but priority) - gen_queues: [LODLevel.count]*JobQueue, + // Upload queues per LOD level + upload_queues: [LODLevel.count]RingBuffer(*LODChunk), - // Worker pool for LOD generation - lod_gen_pool: ?*WorkerPool, + // Transition queue for LOD upgrades/downgrades + transition_queue: std.ArrayListUnmanaged(LODTransition), - // Upload queues per LOD level - upload_queues: [LODLevel.count]RingBuffer(*LODChunk), + // Current player position (chunk coords) + player_cx: i32, + player_cz: i32, - // Transition queue for LOD upgrades/downgrades - transition_queue: std.ArrayListUnmanaged(LODTransition), + // Next job token + next_job_token: u32, - // Current player position (chunk coords) - player_cx: i32, - player_cz: i32, + // Stats + stats: LODStats, - // Next job token - next_job_token: u32, + // Mutex for thread safety + mutex: std.Thread.RwLock, - // Stats - stats: LODStats, + // GPU bridge for upload/destroy/sync operations (replaces direct RHI field) + gpu_bridge: LODGPUBridge, - // Mutex for thread safety - mutex: std.Thread.RwLock, + // Terrain generator for LOD generation (mutable for cache recentering) + generator: Generator, - // RHI for GPU operations - rhi: RHI, + // Paused state + paused: bool, - // Terrain generator for LOD generation (mutable for cache recentering) - generator: Generator, + // Memory tracking + memory_used_bytes: usize, - // Paused state - paused: bool, + // Performance tracking for throttling + update_tick: u32 = 0, - // Memory tracking - memory_used_bytes: usize, + // Deferred mesh deletion queue (Vulkan optimization) + deletion_queue: std.ArrayListUnmanaged(*LODMesh), + deletion_timer: f32 = 0, - // Performance tracking for throttling - update_tick: u32 = 0, + // Type-erased renderer interface (replaces direct LODRenderer(RHI) field) + renderer: LODRenderInterface, - // Deferred mesh deletion queue (Vulkan optimization) - deletion_queue: std.ArrayListUnmanaged(*LODMesh), - deletion_timer: f32 = 0, + // Callback type to check if a regular chunk is loaded and renderable + pub const ChunkChecker = lod_gpu.ChunkChecker; - renderer: *LODRenderer(RHI), + pub fn init(allocator: std.mem.Allocator, config: ILODConfig, gpu_bridge: LODGPUBridge, render_iface: LODRenderInterface, generator: Generator) !*Self { + const mgr = try allocator.create(Self); + errdefer allocator.destroy(mgr); - // Callback type to check if a regular chunk is loaded and renderable - pub const ChunkChecker = *const fn (chunk_x: i32, chunk_z: i32, ctx: *anyopaque) bool; + var regions: [LODLevel.count]RegionMap = undefined; + var meshes: [LODLevel.count]MeshMap = undefined; + var gen_queues: [LODLevel.count]*JobQueue = undefined; + var upload_queues: [LODLevel.count]RingBuffer(*LODChunk) = undefined; + var initialized_levels: usize = 0; - pub fn init(allocator: std.mem.Allocator, config: ILODConfig, rhi: RHI, generator: Generator) !*Self { - const mgr = try allocator.create(Self); + errdefer { + var i: usize = 0; + while (i < initialized_levels) : (i += 1) { + upload_queues[i].deinit(); + gen_queues[i].deinit(); + allocator.destroy(gen_queues[i]); + meshes[i].deinit(); + regions[i].deinit(); + } + } - var regions: [LODLevel.count]std.HashMap(LODRegionKey, *LODChunk, LODRegionKeyContext, 80) = undefined; - var meshes: [LODLevel.count]std.HashMap(LODRegionKey, *LODMesh, LODRegionKeyContext, 80) = undefined; - var gen_queues: [LODLevel.count]*JobQueue = undefined; - var upload_queues: [LODLevel.count]RingBuffer(*LODChunk) = undefined; + for (0..LODLevel.count) |i| { + var region_map = RegionMap.init(allocator); + errdefer region_map.deinit(); - for (0..LODLevel.count) |i| { - regions[i] = std.HashMap(LODRegionKey, *LODChunk, LODRegionKeyContext, 80).init(allocator); - meshes[i] = std.HashMap(LODRegionKey, *LODMesh, LODRegionKeyContext, 80).init(allocator); + var mesh_map = MeshMap.init(allocator); + errdefer mesh_map.deinit(); - const queue = try allocator.create(JobQueue); - queue.* = JobQueue.init(allocator); - gen_queues[i] = queue; + const queue = try allocator.create(JobQueue); + errdefer allocator.destroy(queue); + queue.* = JobQueue.init(allocator); + errdefer queue.deinit(); - upload_queues[i] = try RingBuffer(*LODChunk).init(allocator, 32); - } + var upload_queue = try RingBuffer(*LODChunk).init(allocator, 32); + errdefer upload_queue.deinit(); - const renderer = try LODRenderer(RHI).init(allocator, rhi); - - mgr.* = .{ - .allocator = allocator, - .config = config, - .regions = regions, - .meshes = meshes, - .gen_queues = gen_queues, - .lod_gen_pool = null, // Will be initialized below - .upload_queues = upload_queues, - .transition_queue = .empty, - .player_cx = 0, - .player_cz = 0, - .next_job_token = 1, - .stats = .{}, - .mutex = .{}, - .rhi = rhi, - .generator = generator, - .paused = false, - .memory_used_bytes = 0, - .update_tick = 0, - .deletion_queue = .empty, - .deletion_timer = 0, - .renderer = renderer, - }; - - // Initialize worker pool for LOD generation and meshing (3 workers for LOD tasks) - // All LOD jobs go to LOD3 queue in original code, we keep it consistent but use generic index - mgr.lod_gen_pool = try WorkerPool.init(allocator, 3, mgr.gen_queues[LODLevel.count - 1], mgr, processLODJob); - - const radii = config.getRadii(); - log.log.info("LODManager initialized with radii: LOD0={}, LOD1={}, LOD2={}, LOD3={}", .{ - radii[0], - radii[1], - radii[2], - radii[3], - }); - - return mgr; + regions[i] = region_map; + meshes[i] = mesh_map; + gen_queues[i] = queue; + upload_queues[i] = upload_queue; + initialized_levels += 1; } - pub fn deinit(self: *Self) void { - // Stop and cleanup queues - for (0..LODLevel.count) |i| { - self.gen_queues[i].stop(); - } + mgr.* = .{ + .allocator = allocator, + .config = config, + .regions = regions, + .meshes = meshes, + .gen_queues = gen_queues, + .lod_gen_pool = null, // Will be initialized below + .upload_queues = upload_queues, + .transition_queue = .empty, + .player_cx = 0, + .player_cz = 0, + .next_job_token = 1, + .stats = .{}, + .mutex = .{}, + .gpu_bridge = gpu_bridge, + .generator = generator, + .paused = false, + .memory_used_bytes = 0, + .update_tick = 0, + .deletion_queue = .empty, + .deletion_timer = 0, + .renderer = render_iface, + }; + + // Initialize worker pool for LOD generation and meshing (3 workers for LOD tasks) + // All LOD jobs go to LOD3 queue in original code, we keep it consistent but use generic index + mgr.lod_gen_pool = try WorkerPool.init(allocator, 3, mgr.gen_queues[LODLevel.count - 1], mgr, processLODJob); + + const radii = config.getRadii(); + log.log.info("LODManager initialized with radii: LOD0={}, LOD1={}, LOD2={}, LOD3={}", .{ + radii[0], + radii[1], + radii[2], + radii[3], + }); + + return mgr; + } - // Cleanup worker pool - if (self.lod_gen_pool) |pool| { - pool.deinit(); + pub fn deinit(self: *Self) void { + // Stop and cleanup queues + for (0..LODLevel.count) |i| { + self.gen_queues[i].stop(); + } + + // Cleanup worker pool + if (self.lod_gen_pool) |pool| { + pool.deinit(); + } + + for (0..LODLevel.count) |i| { + self.gen_queues[i].deinit(); + self.allocator.destroy(self.gen_queues[i]); + self.upload_queues[i].deinit(); + + // Cleanup meshes + var mesh_iter = self.meshes[i].iterator(); + while (mesh_iter.next()) |entry| { + self.gpu_bridge.destroy(entry.value_ptr.*); + self.allocator.destroy(entry.value_ptr.*); } + self.meshes[i].deinit(); - for (0..LODLevel.count) |i| { - self.gen_queues[i].deinit(); - self.allocator.destroy(self.gen_queues[i]); - self.upload_queues[i].deinit(); + // Cleanup regions + var region_iter = self.regions[i].iterator(); + while (region_iter.next()) |entry| { + entry.value_ptr.*.deinit(self.allocator); + self.allocator.destroy(entry.value_ptr.*); + } + self.regions[i].deinit(); + } - // Cleanup meshes - var mesh_iter = self.meshes[i].iterator(); - while (mesh_iter.next()) |entry| { - entry.value_ptr.*.deinit(self.rhi); - self.allocator.destroy(entry.value_ptr.*); - } - self.meshes[i].deinit(); + self.transition_queue.deinit(self.allocator); - // Cleanup regions - var region_iter = self.regions[i].iterator(); - while (region_iter.next()) |entry| { - entry.value_ptr.*.deinit(self.allocator); - self.allocator.destroy(entry.value_ptr.*); - } - self.regions[i].deinit(); + // Process any pending deletions + if (self.deletion_queue.items.len > 0) { + self.gpu_bridge.waitIdle(); + for (self.deletion_queue.items) |mesh| { + self.gpu_bridge.destroy(mesh); + self.allocator.destroy(mesh); } + } + self.deletion_queue.deinit(self.allocator); - self.transition_queue.deinit(self.allocator); + // NOTE: LODManager does NOT own the renderer lifetime. + // The renderer is owned by World and deinit'd there. - // Process any pending deletions + self.allocator.destroy(self); + } + + /// Update LOD system with player position + pub fn update(self: *Self, player_pos: Vec3, player_velocity: Vec3, chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque) !void { + if (self.paused) return; + + // Deferred deletion handling (Issue #119: Performance optimization) + // Clean up deleted meshes once per second to avoid waitIdle stalls + self.deletion_timer += 0.016; // Approx 60fps delta + if (self.deletion_timer >= 1.0 or self.deletion_queue.items.len > 50) { if (self.deletion_queue.items.len > 0) { - self.rhi.waitIdle(); + // Ensure GPU is done with resources before deleting + self.gpu_bridge.waitIdle(); for (self.deletion_queue.items) |mesh| { - mesh.deinit(self.rhi); + self.gpu_bridge.destroy(mesh); self.allocator.destroy(mesh); } + self.deletion_queue.clearRetainingCapacity(); } - self.deletion_queue.deinit(self.allocator); + self.deletion_timer = 0; + } - self.renderer.deinit(); + // Throttle heavy LOD management logic (generation queuing, state processing, unloads). + // LOD management involves iterating over thousands of potential regions and can + // take several milliseconds. Throttling to every 4 frames (approx 15Hz at 60fps) + // significantly reduces CPU overhead while remaining responsive to player movement. + self.update_tick += 1; + if (self.update_tick % 4 != 0) return; - self.allocator.destroy(self); + // Issue #211: Clean up LOD chunks that are fully covered by LOD0 (throttled) + if (chunk_checker) |checker| { + self.unloadLODWhereChunksLoaded(checker, checker_ctx.?); } - /// Update LOD system with player position - pub fn update(self: *Self, player_pos: Vec3, player_velocity: Vec3, chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque) !void { - if (self.paused) return; - - // Deferred deletion handling (Issue #119: Performance optimization) - // Clean up deleted meshes once per second to avoid waitIdle stalls - self.deletion_timer += 0.016; // Approx 60fps delta - if (self.deletion_timer >= 1.0 or self.deletion_queue.items.len > 50) { - if (self.deletion_queue.items.len > 0) { - // Ensure GPU is done with resources before deleting - self.rhi.waitIdle(); - for (self.deletion_queue.items) |mesh| { - mesh.deinit(self.rhi); - self.allocator.destroy(mesh); - } - self.deletion_queue.clearRetainingCapacity(); - } - self.deletion_timer = 0; - } + // Safety: Check for NaN/Inf player position + if (!std.math.isFinite(player_pos.x) or !std.math.isFinite(player_pos.z)) return; + + const pc = worldToChunk(@as(i32, @intFromFloat(player_pos.x)), @as(i32, @intFromFloat(player_pos.z))); + self.player_cx = pc.chunk_x; + self.player_cz = pc.chunk_z; + + // Issue #119 Phase 4: Recenter classification cache if player moved far enough. + // This ensures LOD chunks have cache coverage for consistent biome/surface data. + const player_wx: i32 = @intFromFloat(player_pos.x); + const player_wz: i32 = @intFromFloat(player_pos.z); + _ = self.generator.maybeRecenterCache(player_wx, player_wz); + + // Queue LOD regions that need loading (also queue on first frame) + // Priority: LOD3 first (fast, fills horizon), then LOD2, LOD1 + // We iterate backwards from LODLevel.count-1 down to 1 + var i: usize = LODLevel.count - 1; + while (i > 0) : (i -= 1) { + try self.queueLODRegions(@enumFromInt(@as(u3, @intCast(i))), player_velocity, chunk_checker, checker_ctx); + } - // Throttle heavy LOD management logic (generation queuing, state processing, unloads). - // LOD management involves iterating over thousands of potential regions and can - // take several milliseconds. Throttling to every 4 frames (approx 15Hz at 60fps) - // significantly reduces CPU overhead while remaining responsive to player movement. - self.update_tick += 1; - if (self.update_tick % 4 != 0) return; + // Process state transitions + try self.processStateTransitions(); - // Issue #211: Clean up LOD chunks that are fully covered by LOD0 (throttled) - if (chunk_checker) |checker| { - self.unloadLODWhereChunksLoaded(checker, checker_ctx.?); - } + // Process uploads (limited per frame) + self.processUploads(); - // Safety: Check for NaN/Inf player position - if (!std.math.isFinite(player_pos.x) or !std.math.isFinite(player_pos.z)) return; - - const pc = worldToChunk(@as(i32, @intFromFloat(player_pos.x)), @as(i32, @intFromFloat(player_pos.z))); - self.player_cx = pc.chunk_x; - self.player_cz = pc.chunk_z; - - // Issue #119 Phase 4: Recenter classification cache if player moved far enough. - // This ensures LOD chunks have cache coverage for consistent biome/surface data. - const player_wx: i32 = @intFromFloat(player_pos.x); - const player_wz: i32 = @intFromFloat(player_pos.z); - _ = self.generator.maybeRecenterCache(player_wx, player_wz); - - // Queue LOD regions that need loading (also queue on first frame) - // Priority: LOD3 first (fast, fills horizon), then LOD2, LOD1 - // We iterate backwards from LODLevel.count-1 down to 1 - var i: usize = LODLevel.count - 1; - while (i > 0) : (i -= 1) { - try self.queueLODRegions(@enumFromInt(@as(u3, @intCast(i))), player_velocity, chunk_checker, checker_ctx); - } + // Update stats + self.updateStats(); - // Process state transitions - try self.processStateTransitions(); + // Unload distant regions + try self.unloadDistantRegions(); + } - // Process uploads (limited per frame) - self.processUploads(); + /// Queue LOD regions that need generation + fn queueLODRegions(self: *Self, lod: LODLevel, velocity: Vec3, chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque) !void { + const radii = self.config.getRadii(); + const radius = radii[@intFromEnum(lod)]; - // Update stats - self.updateStats(); + // Skip LOD0 - handled by existing World system + if (lod == .lod0) return; - // Unload distant regions - try self.unloadDistantRegions(); - } + var queued_count: u32 = 0; - /// Queue LOD regions that need generation - fn queueLODRegions(self: *Self, lod: LODLevel, velocity: Vec3, chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque) !void { - const radii = self.config.getRadii(); - const radius = radii[@intFromEnum(lod)]; + const scale: i32 = @intCast(lod.chunksPerSide()); + const region_radius = @divFloor(radius, scale) + 1; - // Skip LOD0 - handled by existing World system - if (lod == .lod0) return; + const player_rx = @divFloor(self.player_cx, scale); + const player_rz = @divFloor(self.player_cz, scale); - var queued_count: u32 = 0; + self.mutex.lock(); + defer self.mutex.unlock(); - const scale: i32 = @intCast(lod.chunksPerSide()); - const region_radius = @divFloor(radius, scale) + 1; + const storage = &self.regions[@intFromEnum(lod)]; - const player_rx = @divFloor(self.player_cx, scale); - const player_rz = @divFloor(self.player_cz, scale); + // All LOD jobs go to LOD3 queue (worker pool processes from there) + // We encode the actual LOD level in the dist_sq high bits + const queue = self.gen_queues[LODLevel.count - 1]; + const lod_bits: i32 = @as(i32, @intCast(@intFromEnum(lod))) << 28; - self.mutex.lock(); - defer self.mutex.unlock(); - - const storage = &self.regions[@intFromEnum(lod)]; - - // All LOD jobs go to LOD3 queue (worker pool processes from there) - // We encode the actual LOD level in the dist_sq high bits - const queue = self.gen_queues[LODLevel.count - 1]; - const lod_bits: i32 = @as(i32, @intCast(@intFromEnum(lod))) << 28; - - // Calculate velocity direction for priority - const vel_len = @sqrt(velocity.x * velocity.x + velocity.z * velocity.z); - const has_velocity = vel_len > 0.1; - const vel_dx: f32 = if (has_velocity) velocity.x / vel_len else 0; - const vel_dz: f32 = if (has_velocity) velocity.z / vel_len else 0; - - var rz = player_rz - region_radius; - while (rz <= player_rz + region_radius) : (rz += 1) { - var rx = player_rx - region_radius; - while (rx <= player_rx + region_radius) : (rx += 1) { - // Check circular distance to avoid thrashing corner chunks - const dx = rx - player_rx; - const dz = rz - player_rz; - if (@as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz) > @as(i64, region_radius) * @as(i64, region_radius)) continue; - - const key = LODRegionKey{ .rx = rx, .rz = rz, .lod = lod }; - - // Check if region is covered by higher detail chunks - if (chunk_checker) |checker| { - // We use a temporary chunk to calculate bounds - const temp_chunk = LODChunk.init(rx, rz, lod); - if (self.areAllChunksLoaded(temp_chunk.worldBounds(), checker, checker_ctx.?)) { - continue; - } + // Calculate velocity direction for priority + const vel_len = @sqrt(velocity.x * velocity.x + velocity.z * velocity.z); + const has_velocity = vel_len > 0.1; + const vel_dx: f32 = if (has_velocity) velocity.x / vel_len else 0; + const vel_dz: f32 = if (has_velocity) velocity.z / vel_len else 0; + + var rz = player_rz - region_radius; + while (rz <= player_rz + region_radius) : (rz += 1) { + var rx = player_rx - region_radius; + while (rx <= player_rx + region_radius) : (rx += 1) { + // Check circular distance to avoid thrashing corner chunks + const dx = rx - player_rx; + const dz = rz - player_rz; + if (@as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz) > @as(i64, region_radius) * @as(i64, region_radius)) continue; + + const key = LODRegionKey{ .rx = rx, .rz = rz, .lod = lod }; + + // Check if region is covered by higher detail chunks + if (chunk_checker) |checker| { + // We use a temporary chunk to calculate bounds + const temp_chunk = LODChunk.init(rx, rz, lod); + if (self.areAllChunksLoaded(temp_chunk.worldBounds(), checker, checker_ctx.?)) { + continue; } + } - // Check if region exists and what state it's in - const existing = storage.get(key); - const needs_queue = if (existing) |chunk| - // Re-queue if stuck in missing state - chunk.state == .missing - else - // Queue if doesn't exist - true; - - if (needs_queue) { - queued_count += 1; - - // Reuse existing chunk or create new one - const chunk = if (existing) |c| c else blk: { - const c = try self.allocator.create(LODChunk); - c.* = LODChunk.init(rx, rz, lod); - try storage.put(key, c); - break :blk c; - }; + // Check if region exists and what state it's in + const existing = storage.get(key); + const needs_queue = if (existing) |chunk| + // Re-queue if stuck in missing state + chunk.state == .missing + else + // Queue if doesn't exist + true; + + if (needs_queue) { + queued_count += 1; + + // Reuse existing chunk or create new one + const chunk = if (existing) |c| c else blk: { + const c = try self.allocator.create(LODChunk); + c.* = LODChunk.init(rx, rz, lod); + try storage.put(key, c); + break :blk c; + }; - chunk.job_token = self.next_job_token; - self.next_job_token += 1; - - // Calculate velocity-weighted priority - // (dx, dz calculated above) - const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); - // Scale priority to match chunk-distance units used by meshing jobs (which are prioritized by chunk dist) - // This ensures generation doesn't starve meshing - const priority_full = dist_sq * @as(i64, scale) * @as(i64, scale); - var priority: i32 = @as(i32, @intCast(@min(priority_full, 0x0FFFFFFF))); - if (has_velocity) { - const fdx: f32 = @floatFromInt(dx); - const fdz: f32 = @floatFromInt(dz); - const dist = @sqrt(fdx * fdx + fdz * fdz); - if (dist > 0.01) { - const dot = (fdx * vel_dx + fdz * vel_dz) / dist; - // Ahead = lower priority number, behind = higher - const weight = 1.0 - dot * 0.5; - priority = @intFromFloat(@as(f32, @floatFromInt(priority)) * weight); - } + chunk.job_token = self.next_job_token; + self.next_job_token += 1; + + // Calculate velocity-weighted priority + // (dx, dz calculated above) + const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); + // Scale priority to match chunk-distance units used by meshing jobs (which are prioritized by chunk dist) + // This ensures generation doesn't starve meshing + const priority_full = dist_sq * @as(i64, scale) * @as(i64, scale); + var priority: i32 = @as(i32, @intCast(@min(priority_full, 0x0FFFFFFF))); + if (has_velocity) { + const fdx: f32 = @floatFromInt(dx); + const fdz: f32 = @floatFromInt(dz); + const dist = @sqrt(fdx * fdx + fdz * fdz); + if (dist > 0.01) { + const dot = (fdx * vel_dx + fdz * vel_dz) / dist; + // Ahead = lower priority number, behind = higher + const weight = 1.0 - dot * 0.5; + priority = @intFromFloat(@as(f32, @floatFromInt(priority)) * weight); } + } - // Encode LOD level in high bits of dist_sq - const encoded_priority = (priority & 0x0FFFFFFF) | lod_bits; - - // Queue for generation - try queue.push(.{ - .type = .chunk_generation, - .dist_sq = encoded_priority, - .data = .{ - .chunk = .{ - .x = rx, // Using chunk coords for region coords - .z = rz, - .job_token = chunk.job_token, - }, + // Encode LOD level in high bits of dist_sq + const encoded_priority = (priority & 0x0FFFFFFF) | lod_bits; + + // Queue for generation + try queue.push(.{ + .type = .chunk_generation, + .dist_sq = encoded_priority, + .data = .{ + .chunk = .{ + .x = rx, // Using chunk coords for region coords + .z = rz, + .job_token = chunk.job_token, }, - }); - chunk.state = .generating; // Mark as generating, not queued_for_generation - } + }, + }); + chunk.state = .generating; // Mark as generating, not queued_for_generation } } } + } - /// Process state transitions (generated -> meshing -> ready) - fn processStateTransitions(self: *Self) !void { - // Use exclusive lock since we modify chunk state - self.mutex.lock(); - defer self.mutex.unlock(); - - for (1..LODLevel.count) |i| { - const lod = @as(LODLevel, @enumFromInt(@as(u3, @intCast(i)))); - var iter = self.regions[i].iterator(); - while (iter.next()) |entry| { - const chunk = entry.value_ptr.*; - if (chunk.state == .generated) { - const scale = @as(i32, @intCast(lod.chunksPerSide())); - const dx = chunk.region_x * scale - self.player_cx; - const dz = chunk.region_z * scale - self.player_cz; - const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); - const lod_bits = @as(i32, @intCast(i)) << 28; - - chunk.state = .meshing; - try self.gen_queues[LODLevel.count - 1].push(.{ - .type = .chunk_meshing, - // Encode LOD level in high bits of dist_sq - .dist_sq = @as(i32, @truncate(dist_sq & 0x0FFFFFFF)) | lod_bits, - .data = .{ - .chunk = .{ - .x = chunk.region_x, - .z = chunk.region_z, - .job_token = chunk.job_token, - }, + /// Process state transitions (generated -> meshing -> ready) + fn processStateTransitions(self: *Self) !void { + // Use exclusive lock since we modify chunk state + self.mutex.lock(); + defer self.mutex.unlock(); + + for (1..LODLevel.count) |i| { + const lod = @as(LODLevel, @enumFromInt(@as(u3, @intCast(i)))); + var iter = self.regions[i].iterator(); + while (iter.next()) |entry| { + const chunk = entry.value_ptr.*; + if (chunk.state == .generated) { + const scale = @as(i32, @intCast(lod.chunksPerSide())); + const dx = chunk.region_x * scale - self.player_cx; + const dz = chunk.region_z * scale - self.player_cz; + const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); + const lod_bits = @as(i32, @intCast(i)) << 28; + + chunk.state = .meshing; + try self.gen_queues[LODLevel.count - 1].push(.{ + .type = .chunk_meshing, + // Encode LOD level in high bits of dist_sq + .dist_sq = @as(i32, @truncate(dist_sq & 0x0FFFFFFF)) | lod_bits, + .data = .{ + .chunk = .{ + .x = chunk.region_x, + .z = chunk.region_z, + .job_token = chunk.job_token, }, - }); - } else if (chunk.state == .mesh_ready) { - chunk.state = .uploading; - try self.upload_queues[i].push(chunk); - } + }, + }); + } else if (chunk.state == .mesh_ready) { + chunk.state = .uploading; + try self.upload_queues[i].push(chunk); } } } + } - /// Process GPU uploads (limited per frame) - fn processUploads(self: *Self) void { - // Use exclusive lock since we modify chunk state (chunk.state = .renderable) - self.mutex.lock(); - defer self.mutex.unlock(); - - const max_uploads = self.config.getMaxUploadsPerFrame(); - var uploads: u32 = 0; - - // Process from highest LOD down (furthest, should be ready first) - var i: usize = LODLevel.count - 1; - while (i > 0) : (i -= 1) { - while (!self.upload_queues[i].isEmpty() and uploads < max_uploads) { - if (self.upload_queues[i].pop()) |chunk| { - // Upload mesh to GPU - const key = LODRegionKey{ - .rx = chunk.region_x, - .rz = chunk.region_z, - .lod = chunk.lod_level, + /// Process GPU uploads (limited per frame) + fn processUploads(self: *Self) void { + // Use exclusive lock since we modify chunk state (chunk.state = .renderable) + self.mutex.lock(); + defer self.mutex.unlock(); + + const max_uploads = self.config.getMaxUploadsPerFrame(); + var uploads: u32 = 0; + + // Process from highest LOD down (furthest, should be ready first) + var i: usize = LODLevel.count - 1; + while (i > 0) : (i -= 1) { + while (!self.upload_queues[i].isEmpty() and uploads < max_uploads) { + if (self.upload_queues[i].pop()) |chunk| { + // Upload mesh to GPU via bridge callback + const key = LODRegionKey{ + .rx = chunk.region_x, + .rz = chunk.region_z, + .lod = chunk.lod_level, + }; + if (self.meshes[i].get(key)) |mesh| { + self.gpu_bridge.upload(mesh) catch |err| { + log.log.warn("LOD{} mesh upload failed (will retry): {}", .{ i, err }); + self.stats.upload_failures += 1; + chunk.state = .mesh_ready; // Revert to allow retry + continue; }; - if (self.meshes[i].get(key)) |mesh| { - mesh.upload(self.rhi) catch |err| { - log.log.err("Failed to upload LOD{} mesh: {}", .{ i, err }); - continue; - }; - } - chunk.state = .renderable; - uploads += 1; } + chunk.state = .renderable; + uploads += 1; } } } + } - /// Unload regions that are too far from player - fn unloadDistantRegions(self: *Self) !void { - const radii = self.config.getRadii(); - for (1..LODLevel.count) |i| { - try self.unloadDistantForLevel(@enumFromInt(@as(u3, @intCast(i))), radii[i]); - } + /// Unload regions that are too far from player + fn unloadDistantRegions(self: *Self) !void { + const radii = self.config.getRadii(); + for (1..LODLevel.count) |i| { + try self.unloadDistantForLevel(@enumFromInt(@as(u3, @intCast(i))), radii[i]); } + } - fn unloadDistantForLevel(self: *Self, lod: LODLevel, max_radius: i32) !void { - _ = max_radius; // Interface provides current radii - const radii = self.config.getRadii(); - const lod_radius = radii[@intFromEnum(lod)]; - const storage = &self.regions[@intFromEnum(lod)]; + fn unloadDistantForLevel(self: *Self, lod: LODLevel, max_radius: i32) !void { + _ = max_radius; // Interface provides current radii + const radii = self.config.getRadii(); + const lod_radius = radii[@intFromEnum(lod)]; + const storage = &self.regions[@intFromEnum(lod)]; - const scale: i32 = @intCast(lod.chunksPerSide()); - const player_rx = @divFloor(self.player_cx, scale); - const player_rz = @divFloor(self.player_cz, scale); + const scale: i32 = @intCast(lod.chunksPerSide()); + const player_rx = @divFloor(self.player_cx, scale); + const player_rz = @divFloor(self.player_cz, scale); - // Use same +1 buffer as queuing to match radius exactly - const region_radius = @divFloor(lod_radius, scale) + 1; + // Use same +1 buffer as queuing to match radius exactly + const region_radius = @divFloor(lod_radius, scale) + 1; - var to_remove = std.ArrayListUnmanaged(LODRegionKey).empty; - defer to_remove.deinit(self.allocator); + var to_remove = std.ArrayListUnmanaged(LODRegionKey).empty; + defer to_remove.deinit(self.allocator); - // Hold lock for entire operation to prevent races with worker threads - self.mutex.lock(); - defer self.mutex.unlock(); + // Hold lock for entire operation to prevent races with worker threads + self.mutex.lock(); + defer self.mutex.unlock(); - var iter = storage.iterator(); - while (iter.next()) |entry| { - const key = entry.key_ptr.*; - const chunk = entry.value_ptr.*; + var iter = storage.iterator(); + while (iter.next()) |entry| { + const key = entry.key_ptr.*; + const chunk = entry.value_ptr.*; - const dx = key.rx - player_rx; - const dz = key.rz - player_rz; + const dx = key.rx - player_rx; + const dz = key.rz - player_rz; - const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); - const rad_sq = @as(i64, region_radius) * @as(i64, region_radius); + const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); + const rad_sq = @as(i64, region_radius) * @as(i64, region_radius); - if (dist_sq > rad_sq) { - if (!chunk.isPinned() and - chunk.state != .generating and - chunk.state != .meshing and - chunk.state != .uploading) - { - try to_remove.append(self.allocator, key); - } + if (dist_sq > rad_sq) { + if (!chunk.isPinned() and + chunk.state != .generating and + chunk.state != .meshing and + chunk.state != .uploading) + { + try to_remove.append(self.allocator, key); } } + } - // Remove after iteration (still under lock) - if (to_remove.items.len > 0) { - for (to_remove.items) |key| { - if (storage.get(key)) |chunk| { - // Clean up mesh before removing chunk - const meshes = &self.meshes[@intFromEnum(lod)]; - if (meshes.get(key)) |mesh| { - // Push to deferred deletion queue instead of deleting immediately - self.deletion_queue.append(self.allocator, mesh) catch { - // Fallback if allocation fails: delete immediately (slow but safe) - mesh.deinit(self.rhi); - self.allocator.destroy(mesh); - }; - _ = meshes.remove(key); - } - - chunk.deinit(self.allocator); - self.allocator.destroy(chunk); - _ = storage.remove(key); + // Remove after iteration (still under lock) + if (to_remove.items.len > 0) { + for (to_remove.items) |key| { + if (storage.get(key)) |chunk| { + // Clean up mesh before removing chunk + const meshes = &self.meshes[@intFromEnum(lod)]; + if (meshes.get(key)) |mesh| { + // Push to deferred deletion queue instead of deleting immediately + self.deletion_queue.append(self.allocator, mesh) catch { + // Fallback if allocation fails: delete immediately (slow but safe) + self.gpu_bridge.destroy(mesh); + self.allocator.destroy(mesh); + }; + _ = meshes.remove(key); } + + chunk.deinit(self.allocator); + self.allocator.destroy(chunk); + _ = storage.remove(key); } } } + } - /// Update statistics - fn updateStats(self: *Self) void { - self.stats.reset(); - var mem_usage: usize = 0; - - self.mutex.lockShared(); - defer self.mutex.unlockShared(); - - for (0..LODLevel.count) |i| { - var iter = self.regions[i].iterator(); - while (iter.next()) |entry| { - const chunk = entry.value_ptr.*; - self.stats.recordState(i, chunk.state); + /// Update statistics + fn updateStats(self: *Self) void { + self.stats.reset(); + var mem_usage: usize = 0; - // Calculate actual memory usage for this chunk's data - switch (chunk.data) { - .simplified => |*s| { - mem_usage += s.totalMemoryBytes(); - }, - else => {}, - } - } + self.mutex.lockShared(); + defer self.mutex.unlockShared(); - // Add mesh memory - var mesh_iter = self.meshes[i].iterator(); - while (mesh_iter.next()) |entry| { - mem_usage += entry.value_ptr.*.capacity * @sizeOf(Vertex); + for (0..LODLevel.count) |i| { + var iter = self.regions[i].iterator(); + while (iter.next()) |entry| { + const chunk = entry.value_ptr.*; + self.stats.recordState(i, chunk.state); + + // Calculate actual memory usage for this chunk's data + switch (chunk.data) { + .simplified => |*s| { + mem_usage += s.totalMemoryBytes(); + }, + else => {}, } } - self.stats.addMemory(mem_usage); - self.memory_used_bytes = mem_usage; + // Add mesh memory + var mesh_iter = self.meshes[i].iterator(); + while (mesh_iter.next()) |entry| { + mem_usage += entry.value_ptr.*.capacity * @sizeOf(Vertex); + } } - /// Get current statistics - pub fn getStats(self: *Self) LODStats { - return self.stats; - } + self.stats.addMemory(mem_usage); + self.memory_used_bytes = mem_usage; + } - /// Pause all LOD generation - pub fn pause(self: *Self) void { - self.paused = true; - for (0..LODLevel.count) |i| { - self.gen_queues[i].setPaused(true); - } - } + /// Get current statistics + pub fn getStats(self: *Self) LODStats { + return self.stats; + } - /// Resume LOD generation - pub fn unpause(self: *Self) void { - self.paused = false; - for (0..LODLevel.count) |i| { - self.gen_queues[i].setPaused(false); - } + /// Pause all LOD generation + pub fn pause(self: *Self) void { + self.paused = true; + for (0..LODLevel.count) |i| { + self.gen_queues[i].setPaused(true); } + } - /// Get LOD level for a given chunk distance - pub fn getLODForDistance(self: *const Self, chunk_x: i32, chunk_z: i32) LODLevel { - const dx = chunk_x - self.player_cx; - const dz = chunk_z - self.player_cz; - const dist = @max(@abs(dx), @abs(dz)); - return self.config.getLODForDistance(dist); + /// Resume LOD generation + pub fn unpause(self: *Self) void { + self.paused = false; + for (0..LODLevel.count) |i| { + self.gen_queues[i].setPaused(false); } + } - /// Check if a position is within LOD range - pub fn isInRange(self: *const Self, chunk_x: i32, chunk_z: i32) bool { - const dx = chunk_x - self.player_cx; - const dz = chunk_z - self.player_cz; - const dist = @max(@abs(dx), @abs(dz)); - return self.config.isInRange(dist); - } + /// Get LOD level for a given chunk distance + pub fn getLODForDistance(self: *const Self, chunk_x: i32, chunk_z: i32) LODLevel { + const dx = chunk_x - self.player_cx; + const dz = chunk_z - self.player_cz; + const dist = @max(@abs(dx), @abs(dz)); + return self.config.getLODForDistance(dist); + } - /// Render all LOD meshes - /// chunk_checker: Optional callback to check if regular chunks cover this region. - /// If all chunks in region are loaded, the LOD region is skipped. - /// - /// NOTE: Acquires a shared lock on LODManager. LODRenderer must NOT attempt to acquire - /// a write lock on LODManager during rendering to avoid deadlocks. - pub fn render(self: *Self, view_proj: Mat4, camera_pos: Vec3, chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque, use_frustum: bool) void { - self.mutex.lockShared(); - defer self.mutex.unlockShared(); - - self.renderer.render(self, view_proj, camera_pos, chunk_checker, checker_ctx, use_frustum); - } + /// Check if a position is within LOD range + pub fn isInRange(self: *const Self, chunk_x: i32, chunk_z: i32) bool { + const dx = chunk_x - self.player_cx; + const dz = chunk_z - self.player_cz; + const dist = @max(@abs(dx), @abs(dz)); + return self.config.isInRange(dist); + } - /// Free LOD meshes where all underlying chunks are loaded - pub fn unloadLODWhereChunksLoaded(self: *Self, checker: ChunkChecker, ctx: *anyopaque) void { - // Lock exclusive because we modify meshes and regions maps - self.mutex.lock(); - defer self.mutex.unlock(); + /// Render all LOD meshes + /// chunk_checker: Optional callback to check if regular chunks cover this region. + /// If all chunks in region are loaded, the LOD region is skipped. + /// + /// NOTE: Acquires a shared lock on LODManager. LODRenderer must NOT attempt to acquire + /// a write lock on LODManager during rendering to avoid deadlocks. + pub fn render(self: *Self, view_proj: Mat4, camera_pos: Vec3, chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque, use_frustum: bool) void { + self.mutex.lockShared(); + defer self.mutex.unlockShared(); + + self.renderer.render(&self.meshes, &self.regions, self.config, view_proj, camera_pos, chunk_checker, checker_ctx, use_frustum); + } - for (1..LODLevel.count) |i| { - const storage = &self.regions[i]; - const meshes = &self.meshes[i]; + /// Free LOD meshes where all underlying chunks are loaded + pub fn unloadLODWhereChunksLoaded(self: *Self, checker: ChunkChecker, ctx: *anyopaque) void { + // Lock exclusive because we modify meshes and regions maps + self.mutex.lock(); + defer self.mutex.unlock(); - var to_remove = std.ArrayListUnmanaged(LODRegionKey).empty; - defer to_remove.deinit(self.allocator); + for (1..LODLevel.count) |i| { + const storage = &self.regions[i]; + const meshes = &self.meshes[i]; - var iter = meshes.iterator(); - while (iter.next()) |entry| { - if (storage.get(entry.key_ptr.*)) |chunk| { - // Don't unload if being processed (pinned) or not ready - if (chunk.isPinned() or chunk.state == .generating or chunk.state == .meshing or chunk.state == .uploading) continue; + var to_remove = std.ArrayListUnmanaged(LODRegionKey).empty; + defer to_remove.deinit(self.allocator); - const bounds = chunk.worldBounds(); - if (self.areAllChunksLoaded(bounds, checker, ctx)) { - to_remove.append(self.allocator, entry.key_ptr.*) catch {}; - } + var iter = meshes.iterator(); + while (iter.next()) |entry| { + if (storage.get(entry.key_ptr.*)) |chunk| { + // Don't unload if being processed (pinned) or not ready + if (chunk.isPinned() or chunk.state == .generating or chunk.state == .meshing or chunk.state == .uploading) continue; + + const bounds = chunk.worldBounds(); + if (self.areAllChunksLoaded(bounds, checker, ctx)) { + to_remove.append(self.allocator, entry.key_ptr.*) catch {}; } } + } - for (to_remove.items) |rem_key| { - if (meshes.fetchRemove(rem_key)) |mesh_entry| { - // Queue for deferred deletion to avoid waitIdle stutter - self.deletion_queue.append(self.allocator, mesh_entry.value) catch { - mesh_entry.value.deinit(self.rhi); - self.allocator.destroy(mesh_entry.value); - }; - } - if (storage.fetchRemove(rem_key)) |chunk_entry| { - chunk_entry.value.deinit(self.allocator); - self.allocator.destroy(chunk_entry.value); - } + for (to_remove.items) |rem_key| { + if (meshes.fetchRemove(rem_key)) |mesh_entry| { + // Queue for deferred deletion to avoid waitIdle stutter + self.deletion_queue.append(self.allocator, mesh_entry.value) catch { + self.gpu_bridge.destroy(mesh_entry.value); + self.allocator.destroy(mesh_entry.value); + }; + } + if (storage.fetchRemove(rem_key)) |chunk_entry| { + chunk_entry.value.deinit(self.allocator); + self.allocator.destroy(chunk_entry.value); } } } + } - /// Check if all chunks within the given world bounds are loaded and renderable - pub fn areAllChunksLoaded(self: *Self, bounds: LODChunk.WorldBounds, checker: ChunkChecker, ctx: *anyopaque) bool { - _ = self; - // Convert world bounds to chunk coordinates - const min_cx = @divFloor(bounds.min_x, CHUNK_SIZE_X); - const min_cz = @divFloor(bounds.min_z, CHUNK_SIZE_X); - const max_cx = @divFloor(bounds.max_x - 1, CHUNK_SIZE_X); // -1 because max is exclusive - const max_cz = @divFloor(bounds.max_z - 1, CHUNK_SIZE_X); - - // Check every chunk in the region - var cz = min_cz; - while (cz <= max_cz) : (cz += 1) { - var cx = min_cx; - while (cx <= max_cx) : (cx += 1) { - if (!checker(cx, cz, ctx)) { - return false; // At least one chunk is not loaded - } + /// Check if all chunks within the given world bounds are loaded and renderable + pub fn areAllChunksLoaded(self: *Self, bounds: LODChunk.WorldBounds, checker: ChunkChecker, ctx: *anyopaque) bool { + _ = self; + // Convert world bounds to chunk coordinates + const min_cx = @divFloor(bounds.min_x, CHUNK_SIZE_X); + const min_cz = @divFloor(bounds.min_z, CHUNK_SIZE_X); + const max_cx = @divFloor(bounds.max_x - 1, CHUNK_SIZE_X); // -1 because max is exclusive + const max_cz = @divFloor(bounds.max_z - 1, CHUNK_SIZE_X); + + // Check every chunk in the region + var cz = min_cz; + while (cz <= max_cz) : (cz += 1) { + var cx = min_cx; + while (cx <= max_cx) : (cx += 1) { + if (!checker(cx, cz, ctx)) { + return false; // At least one chunk is not loaded } } - return true; // All chunks are loaded } + return true; // All chunks are loaded + } - /// Get or create mesh for a LOD region - fn getOrCreateMesh(self: *Self, key: LODRegionKey) !*LODMesh { - self.mutex.lock(); - defer self.mutex.unlock(); - - const lod_idx = @intFromEnum(key.lod); - if (lod_idx == 0 or lod_idx >= LODLevel.count) return error.InvalidLODLevel; + /// Get or create mesh for a LOD region + fn getOrCreateMesh(self: *Self, key: LODRegionKey) !*LODMesh { + self.mutex.lock(); + defer self.mutex.unlock(); - const meshes = &self.meshes[lod_idx]; + const lod_idx = @intFromEnum(key.lod); + if (lod_idx == 0 or lod_idx >= LODLevel.count) return error.InvalidLODLevel; - if (meshes.get(key)) |mesh| { - return mesh; - } + const meshes = &self.meshes[lod_idx]; - const mesh = try self.allocator.create(LODMesh); - mesh.* = LODMesh.init(self.allocator, key.lod); - try meshes.put(key, mesh); + if (meshes.get(key)) |mesh| { return mesh; } - /// Build mesh for an LOD chunk (called after generation completes) - fn buildMeshForChunk(self: *Self, chunk: *LODChunk) !void { - const key = LODRegionKey{ - .rx = chunk.region_x, - .rz = chunk.region_z, - .lod = chunk.lod_level, - }; - - const mesh = try self.getOrCreateMesh(key); + const mesh = try self.allocator.create(LODMesh); + mesh.* = LODMesh.init(self.allocator, key.lod); + try meshes.put(key, mesh); + return mesh; + } - // Access chunk.data under shared lock - the data is read-only during meshing - // and the chunk is pinned, so we just need to ensure visibility - self.mutex.lockShared(); - defer self.mutex.unlockShared(); + /// Build mesh for an LOD chunk (called after generation completes) + fn buildMeshForChunk(self: *Self, chunk: *LODChunk) !void { + const key = LODRegionKey{ + .rx = chunk.region_x, + .rz = chunk.region_z, + .lod = chunk.lod_level, + }; - switch (chunk.data) { - .simplified => |*data| { - const bounds = chunk.worldBounds(); - try mesh.buildFromSimplifiedData(data, bounds.min_x, bounds.min_z); - }, - .full => { - // LOD0 meshes handled by World, not LODManager - }, - .empty => { - // No data to build mesh from - }, - } + const mesh = try self.getOrCreateMesh(key); + + // Access chunk.data under shared lock - the data is read-only during meshing + // and the chunk is pinned, so we just need to ensure visibility + self.mutex.lockShared(); + defer self.mutex.unlockShared(); + + switch (chunk.data) { + .simplified => |*data| { + const bounds = chunk.worldBounds(); + try mesh.buildFromSimplifiedData(data, bounds.min_x, bounds.min_z); + }, + .full => { + // LOD0 meshes handled by World, not LODManager + }, + .empty => { + // No data to build mesh from + }, } + } - /// Worker pool callback for LOD tasks (generation and meshing) - fn processLODJob(ctx: *anyopaque, job: Job) void { - const self: *Self = @ptrCast(@alignCast(ctx)); - - // Determine which LOD level this job is for based on encoded priority - const lod_level: LODLevel = @enumFromInt(@as(u3, @intCast((job.dist_sq >> 28) & 0x7))); - const key = LODRegionKey{ - .rx = job.data.chunk.x, - .rz = job.data.chunk.z, - .lod = lod_level, - }; - - const lod_idx = @intFromEnum(lod_level); - if (lod_idx == 0) { - return; - } + /// Worker pool callback for LOD tasks (generation and meshing) + fn processLODJob(ctx: *anyopaque, job: Job) void { + const self: *Self = @ptrCast(@alignCast(ctx)); - // Phase 1: Acquire lock, validate job, pin chunk - self.mutex.lock(); - const storage = &self.regions[lod_idx]; - - const chunk = storage.get(key) orelse { - self.mutex.unlock(); - return; - }; - - // Stale job check (too far from player) - const scale: i32 = @intCast(lod_level.chunksPerSide()); - const player_rx = @divFloor(self.player_cx, scale); - const player_rz = @divFloor(self.player_cz, scale); - const dx = job.data.chunk.x - player_rx; - const dz = job.data.chunk.z - player_rz; - const radii = self.config.getRadii(); - const radius = radii[lod_idx]; - const region_radius = @divFloor(radius, scale) + 2; + // Determine which LOD level this job is for based on encoded priority + const lod_level: LODLevel = @enumFromInt(@as(u3, @intCast((job.dist_sq >> 28) & 0x7))); + const key = LODRegionKey{ + .rx = job.data.chunk.x, + .rz = job.data.chunk.z, + .lod = lod_level, + }; - const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); - const rad_sq = @as(i64, region_radius) * @as(i64, region_radius); + const lod_idx = @intFromEnum(lod_level); + if (lod_idx == 0) { + return; + } - if (dist_sq > rad_sq) { - if (chunk.state == .generating or chunk.state == .meshing) { - chunk.state = .missing; - } - self.mutex.unlock(); - return; - } + // Phase 1: Acquire lock, validate job, pin chunk + self.mutex.lock(); + const storage = &self.regions[lod_idx]; - // Skip if token mismatch - if (chunk.job_token != job.data.chunk.job_token) { - self.mutex.unlock(); - return; - } + const chunk = storage.get(key) orelse { + self.mutex.unlock(); + return; + }; - // Check state and capture job type before releasing lock - const current_state = chunk.state; - const job_type = job.type; + // Stale job check (too far from player) + const scale: i32 = @intCast(lod_level.chunksPerSide()); + const player_rx = @divFloor(self.player_cx, scale); + const player_rz = @divFloor(self.player_cz, scale); + const dx = job.data.chunk.x - player_rx; + const dz = job.data.chunk.z - player_rz; + const radii = self.config.getRadii(); + const radius = radii[lod_idx]; + const region_radius = @divFloor(radius, scale) + 2; + + const dist_sq = @as(i64, dx) * @as(i64, dx) + @as(i64, dz) * @as(i64, dz); + const rad_sq = @as(i64, region_radius) * @as(i64, region_radius); + + if (dist_sq > rad_sq) { + if (chunk.state == .generating or chunk.state == .meshing) { + chunk.state = .missing; + } + self.mutex.unlock(); + return; + } - // Validate state matches expected for job type - const valid_state = switch (job_type) { - .chunk_generation => current_state == .generating, - .chunk_meshing => current_state == .meshing, - else => false, - }; + // Skip if token mismatch + if (chunk.job_token != job.data.chunk.job_token) { + self.mutex.unlock(); + return; + } - if (!valid_state) { - self.mutex.unlock(); - return; - } + // Check state and capture job type before releasing lock + const current_state = chunk.state; + const job_type = job.type; - // Check if we need to generate data (while still holding lock) - const needs_data_init = (job_type == .chunk_generation and chunk.data != .simplified); + // Validate state matches expected for job type + const valid_state = switch (job_type) { + .chunk_generation => current_state == .generating, + .chunk_meshing => current_state == .meshing, + else => false, + }; - // Pin chunk during operation (prevents unload) - chunk.pin(); + if (!valid_state) { self.mutex.unlock(); + return; + } - // Phase 2: Do expensive work without lock - var success = false; - var new_state: LODState = .missing; - - switch (job_type) { - .chunk_generation => { - // Initialize simplified data if needed - if (needs_data_init) { - var data = LODSimplifiedData.init(self.allocator, lod_level) catch { - new_state = .missing; - chunk.unpin(); - // Acquire lock briefly to update state - self.mutex.lock(); - chunk.state = new_state; - self.mutex.unlock(); - return; - }; + // Check if we need to generate data (while still holding lock) + const needs_data_init = (job_type == .chunk_generation and chunk.data != .simplified); - // Generate heightmap data (expensive, done without lock) - self.generator.generateHeightmapOnly(&data, chunk.region_x, chunk.region_z, lod_level); + // Pin chunk during operation (prevents unload) + chunk.pin(); + self.mutex.unlock(); - // Acquire lock to update chunk data - self.mutex.lock(); - chunk.data = .{ .simplified = data }; - self.mutex.unlock(); - } - success = true; - new_state = .generated; - }, - .chunk_meshing => { - // Build mesh (expensive, done without lock) - // Note: buildMeshForChunk -> getOrCreateMesh acquires its own lock - self.buildMeshForChunk(chunk) catch |err| { - log.log.err("Failed to build LOD{} async mesh: {}", .{ @intFromEnum(lod_level), err }); - new_state = .generated; // Retry later + // Phase 2: Do expensive work without lock + var success = false; + var new_state: LODState = .missing; + + switch (job_type) { + .chunk_generation => { + // Initialize simplified data if needed + if (needs_data_init) { + var data = LODSimplifiedData.init(self.allocator, lod_level) catch { + new_state = .missing; chunk.unpin(); // Acquire lock briefly to update state self.mutex.lock(); @@ -944,57 +944,60 @@ pub fn LODManager(comptime RHI: type) type { self.mutex.unlock(); return; }; - success = true; - new_state = .mesh_ready; - }, - else => unreachable, - } - chunk.unpin(); + // Generate heightmap data (expensive, done without lock) + self.generator.generateHeightmapOnly(&data, chunk.region_x, chunk.region_z, lod_level); - // Phase 3: Acquire lock briefly to update state - if (success) { - self.mutex.lock(); - // Re-verify token hasn't changed while we were working - if (chunk.job_token == job.data.chunk.job_token) { - chunk.state = new_state; + // Acquire lock to update chunk data + self.mutex.lock(); + chunk.data = .{ .simplified = data }; + self.mutex.unlock(); } - self.mutex.unlock(); + success = true; + new_state = .generated; + }, + .chunk_meshing => { + // Build mesh (expensive, done without lock) + // Note: buildMeshForChunk -> getOrCreateMesh acquires its own lock + self.buildMeshForChunk(chunk) catch |err| { + log.log.err("Failed to build LOD{} async mesh: {}", .{ @intFromEnum(lod_level), err }); + new_state = .generated; // Retry later + chunk.unpin(); + // Acquire lock briefly to update state + self.mutex.lock(); + chunk.state = new_state; + self.mutex.unlock(); + return; + }; + success = true; + new_state = .mesh_ready; + }, + else => unreachable, + } + + chunk.unpin(); + + // Phase 3: Acquire lock briefly to update state + if (success) { + self.mutex.lock(); + // Re-verify token hasn't changed while we were working + if (chunk.job_token == job.data.chunk.job_token) { + chunk.state = new_state; } + self.mutex.unlock(); } - }; -} + } +}; // Tests test "LODManager initialization" { const allocator = std.testing.allocator; - const MockRHIState = struct { + const MockState = struct { buffer_created: bool = false, buffer_destroyed: bool = false, }; - // Mock RHI for testing - const MockRHI = struct { - state: *MockRHIState, - - pub fn createBuffer(self: @This(), _: usize, _: anytype) !u32 { - self.state.buffer_created = true; - return 1; - } - pub fn destroyBuffer(self: @This(), _: u32) void { - self.state.buffer_destroyed = true; - } - pub fn getFrameIndex(_: @This()) usize { - return 0; - } - // Needed by LODManager - pub fn waitIdle(_: @This()) void {} - // Needed by LODRenderer - pub fn setModelMatrix(_: @This(), _: Mat4, _: Vec3, _: f32) void {} - pub fn draw(_: @This(), _: u32, _: u32, _: anytype) void {} - }; - const MockGenerator = struct { fn generate(_: *anyopaque, _: *Chunk, _: ?*const bool) void {} fn generateHeightmapOnly(_: *anyopaque, _: *LODSimplifiedData, _: i32, _: i32, _: LODLevel) void {} @@ -1035,15 +1038,36 @@ test "LODManager initialization" { .radii = .{ 8, 16, 32, 64 }, }; - // Test that we can instantiate the generic manager with MockRHI - var mock_state = MockRHIState{}; - const mock_rhi = MockRHI{ .state = &mock_state }; + // Create mock GPU bridge + var mock_state = MockState{}; + const mock_bridge = LODGPUBridge{ + .on_upload = struct { + fn f(_: *LODMesh, _: *anyopaque) rhi_types.RhiError!void {} + }.f, + .on_destroy = struct { + fn f(_: *LODMesh, ctx: *anyopaque) void { + const state: *MockState = @ptrCast(@alignCast(ctx)); + state.buffer_destroyed = true; + } + }.f, + .on_wait_idle = struct { + fn f(_: *anyopaque) void {} + }.f, + .ctx = @ptrCast(&mock_state), + }; - const Manager = LODManager(MockRHI); - var mgr = try Manager.init(allocator, config.interface(), mock_rhi, mock_gen); + // Create mock render interface + const mock_render = LODRenderInterface{ + .render_fn = struct { + fn f(_: *anyopaque, _: *const [LODLevel.count]MeshMap, _: *const [LODLevel.count]RegionMap, _: ILODConfig, _: Mat4, _: Vec3, _: ?LODManager.ChunkChecker, _: ?*anyopaque, _: bool) void {} + }.f, + .deinit_fn = struct { + fn f(_: *anyopaque) void {} + }.f, + .ptr = @ptrCast(&mock_state), + }; - // Verify init called createBuffer (via LODRenderer) - try std.testing.expect(mock_state.buffer_created); + var mgr = try LODManager.init(allocator, config.interface(), mock_bridge, mock_render, mock_gen); // Verify initial state const stats = mgr.getStats(); @@ -1052,8 +1076,8 @@ test "LODManager initialization" { mgr.deinit(); - // Verify deinit called destroyBuffer - try std.testing.expect(mock_state.buffer_destroyed); + // NOTE: LODManager does NOT call renderer.deinit() - renderer lifetime is + // owned by the caller (World). This is tested in the integration test below. // Check config values try std.testing.expectEqual(LODLevel.lod0, config.getLODForDistance(5)); @@ -1065,21 +1089,6 @@ test "LODManager initialization" { test "LODManager end-to-end covered cleanup" { const allocator = std.testing.allocator; - // Mock setup - const MockRHI = struct { - pub fn createBuffer(_: @This(), _: usize, _: anytype) !u32 { - return 1; - } - pub fn destroyBuffer(_: @This(), _: u32) void {} - pub fn uploadBuffer(_: @This(), _: u32, _: []const u8) !void {} - pub fn getFrameIndex(_: @This()) usize { - return 0; - } - pub fn waitIdle(_: @This()) void {} - pub fn setModelMatrix(_: @This(), _: Mat4, _: Vec3, _: f32) void {} - pub fn draw(_: @This(), _: u32, _: u32, _: anytype) void {} - }; - const MockGenerator = struct { fn generate(_: *anyopaque, _: *Chunk, _: ?*const bool) void {} fn generateHeightmapOnly(_: *anyopaque, _: *LODSimplifiedData, _: i32, _: i32, _: LODLevel) void {} @@ -1119,8 +1128,33 @@ test "LODManager end-to-end covered cleanup" { .radii = .{ 2, 4, 8, 16 }, }; - const Manager = LODManager(MockRHI); - var mgr = try Manager.init(allocator, config.interface(), .{}, mock_gen); + // Create mock GPU bridge (no-op). Use a real pointer to satisfy debug assertions. + var noop_ctx: u8 = 0; + const mock_bridge = LODGPUBridge{ + .on_upload = struct { + fn f(_: *LODMesh, _: *anyopaque) rhi_types.RhiError!void {} + }.f, + .on_destroy = struct { + fn f(_: *LODMesh, _: *anyopaque) void {} + }.f, + .on_wait_idle = struct { + fn f(_: *anyopaque) void {} + }.f, + .ctx = @ptrCast(&noop_ctx), + }; + + // Create mock render interface (no-op). Use a real pointer. + const mock_render = LODRenderInterface{ + .render_fn = struct { + fn f(_: *anyopaque, _: *const [LODLevel.count]MeshMap, _: *const [LODLevel.count]RegionMap, _: ILODConfig, _: Mat4, _: Vec3, _: ?LODManager.ChunkChecker, _: ?*anyopaque, _: bool) void {} + }.f, + .deinit_fn = struct { + fn f(_: *anyopaque) void {} + }.f, + .ptr = @ptrCast(&noop_ctx), + }; + + var mgr = try LODManager.init(allocator, config.interface(), mock_bridge, mock_render, mock_gen); defer mgr.deinit(); // 1. Initial position at origin diff --git a/src/world/lod_renderer.zig b/src/world/lod_renderer.zig index 1ba95ecb..29db8f23 100644 --- a/src/world/lod_renderer.zig +++ b/src/world/lod_renderer.zig @@ -11,6 +11,13 @@ const LODRegionKeyContext = lod_chunk.LODRegionKeyContext; const LODMesh = @import("lod_mesh.zig").LODMesh; const CHUNK_SIZE_X = @import("chunk.zig").CHUNK_SIZE_X; +const lod_gpu = @import("lod_upload_queue.zig"); +const LODGPUBridge = lod_gpu.LODGPUBridge; +const LODRenderInterface = lod_gpu.LODRenderInterface; +const MeshMap = lod_gpu.MeshMap; +const RegionMap = lod_gpu.RegionMap; +const ChunkChecker = lod_gpu.ChunkChecker; + const Vec3 = @import("../engine/math/vec3.zig").Vec3; const Mat4 = @import("../engine/math/mat4.zig").Mat4; const Frustum = @import("../engine/math/frustum.zig").Frustum; @@ -22,6 +29,7 @@ const log = @import("../engine/core/log.zig"); /// - createBuffer(size: usize, usage: BufferUsage) !BufferHandle /// - destroyBuffer(handle: BufferHandle) void /// - getFrameIndex() usize +/// - setLODInstanceBuffer(handle: BufferHandle) void /// - setModelMatrix(model: Mat4, color: Vec3, mask_radius: f32) void /// - draw(handle: BufferHandle, count: u32, mode: DrawMode) void pub fn LODRenderer(comptime RHI: type) type { @@ -70,22 +78,21 @@ pub fn LODRenderer(comptime RHI: type) type { self.allocator.destroy(self); } - /// Render all LOD meshes + /// Render all LOD meshes using explicitly provided data. pub fn render( self: *Self, - manager: anytype, + meshes: *const [LODLevel.count]MeshMap, + regions: *const [LODLevel.count]RegionMap, + config: ILODConfig, view_proj: Mat4, camera_pos: Vec3, - chunk_checker: ?*const fn (i32, i32, *anyopaque) bool, + chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque, use_frustum: bool, ) void { // Update frame index self.frame_index = self.rhi.getFrameIndex(); - self.instance_data.clearRetainingCapacity(); - self.draw_list.clearRetainingCapacity(); - // Set LOD mode on RHI self.rhi.setLODInstanceBuffer(self.instance_buffers[self.frame_index]); @@ -99,7 +106,7 @@ pub fn LODRenderer(comptime RHI: type) type { // Process from highest LOD down var i: usize = LODLevel.count - 1; while (i > 0) : (i -= 1) { - self.collectVisibleMeshes(manager, &manager.meshes[i], &manager.regions[i], view_proj, camera_pos, frustum, lod_y_offset, chunk_checker, checker_ctx, use_frustum) catch |err| { + self.collectVisibleMeshes(&meshes[i], ®ions[i], config, view_proj, camera_pos, frustum, lod_y_offset, chunk_checker, checker_ctx, use_frustum) catch |err| { log.log.err("Failed to collect visible meshes for LOD{}: {}", .{ i, err }); }; } @@ -115,14 +122,14 @@ pub fn LODRenderer(comptime RHI: type) type { fn collectVisibleMeshes( self: *Self, - manager: anytype, - meshes: anytype, - regions: anytype, + meshes: *const MeshMap, + regions: *const RegionMap, + config: ILODConfig, view_proj: Mat4, camera_pos: Vec3, frustum: Frustum, lod_y_offset: f32, - chunk_checker: ?*const fn (i32, i32, *anyopaque) bool, + chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque, use_frustum: bool, ) !void { @@ -163,7 +170,7 @@ pub fn LODRenderer(comptime RHI: type) type { const model = Mat4.translate(Vec3.init(@as(f32, @floatFromInt(bounds.min_x)) - camera_pos.x, -camera_pos.y + lod_y_offset, @as(f32, @floatFromInt(bounds.min_z)) - camera_pos.z)); - const mask_radius = manager.config.calculateMaskRadius() * @as(f32, @floatFromInt(CHUNK_SIZE_X)); + const mask_radius = config.calculateMaskRadius() * @as(f32, @floatFromInt(CHUNK_SIZE_X)); try self.instance_data.append(self.allocator, .{ .view_proj = view_proj, .model = model, @@ -174,6 +181,59 @@ pub fn LODRenderer(comptime RHI: type) type { } } } + + /// Create a LODGPUBridge that delegates to this renderer's RHI. + pub fn createGPUBridge(self: *Self) LODGPUBridge { + const Wrapper = struct { + fn onUpload(mesh: *LODMesh, ctx: *anyopaque) rhi_types.RhiError!void { + const rhi: *RHI = @ptrCast(@alignCast(ctx)); + return mesh.upload(rhi.*); + } + fn onDestroy(mesh: *LODMesh, ctx: *anyopaque) void { + const rhi: *RHI = @ptrCast(@alignCast(ctx)); + mesh.deinit(rhi.*); + } + fn onWaitIdle(ctx: *anyopaque) void { + const rhi: *RHI = @ptrCast(@alignCast(ctx)); + rhi.waitIdle(); + } + }; + return .{ + .on_upload = Wrapper.onUpload, + .on_destroy = Wrapper.onDestroy, + .on_wait_idle = Wrapper.onWaitIdle, + .ctx = @ptrCast(&self.rhi), + }; + } + + /// Create a type-erased LODRenderInterface from this renderer. + pub fn toInterface(self: *Self) LODRenderInterface { + const Wrapper = struct { + fn renderFn( + self_ptr: *anyopaque, + meshes: *const [LODLevel.count]MeshMap, + regions: *const [LODLevel.count]RegionMap, + config: ILODConfig, + view_proj: Mat4, + camera_pos: Vec3, + chunk_checker: ?ChunkChecker, + checker_ctx: ?*anyopaque, + use_frustum: bool, + ) void { + const renderer: *Self = @ptrCast(@alignCast(self_ptr)); + renderer.render(meshes, regions, config, view_proj, camera_pos, chunk_checker, checker_ctx, use_frustum); + } + fn deinitFn(self_ptr: *anyopaque) void { + const renderer: *Self = @ptrCast(@alignCast(self_ptr)); + renderer.deinit(); + } + }; + return .{ + .render_fn = Wrapper.renderFn, + .deinit_fn = Wrapper.deinitFn, + .ptr = self, + }; + } }; } @@ -268,10 +328,6 @@ test "LODRenderer render draw path" { var chunk = LODChunk.init(0, 0, .lod1); chunk.state = .renderable; - // Create mock manager with meshes and regions - const MeshMap = std.HashMap(LODRegionKey, *LODMesh, LODRegionKeyContext, 80); - const RegionMap = std.HashMap(LODRegionKey, *LODChunk, LODRegionKeyContext, 80); - var meshes: [LODLevel.count]MeshMap = undefined; var regions: [LODLevel.count]RegionMap = undefined; for (0..LODLevel.count) |i| { @@ -290,31 +346,15 @@ test "LODRenderer render draw path" { try meshes[1].put(key, &mesh); try regions[1].put(key, &chunk); - const MockManager = struct { - meshes: *[LODLevel.count]MeshMap, - regions: *[LODLevel.count]RegionMap, - config: ILODConfig, - - pub fn unloadLODWhereChunksLoaded(_: @This(), _: anytype, _: anytype) void {} - pub fn areAllChunksLoaded(_: @This(), _: anytype, _: anytype, _: anytype) bool { - return false; // Not loaded, so LOD should render - } - }; - var mock_config = LODConfig{}; - const mock_manager = MockManager{ - .meshes = &meshes, - .regions = ®ions, - .config = mock_config.interface(), - }; // Create view-projection matrix that includes origin (where our chunk is) // Use identity for simplicity - frustum will include everything const view_proj = Mat4.identity; const camera_pos = Vec3.zero; - // Call render - renderer.render(mock_manager, view_proj, camera_pos, null, null, true); + // Call render with explicit parameters + renderer.render(&meshes, ®ions, mock_config.interface(), view_proj, camera_pos, null, null, true); // Verify draw was called with correct parameters try std.testing.expectEqual(@as(u32, 1), mock_state.draw_calls); @@ -322,3 +362,102 @@ test "LODRenderer render draw path" { try std.testing.expectEqual(@as(u32, 42), mock_state.last_buffer_handle); try std.testing.expectEqual(@as(u32, 100), mock_state.last_vertex_count); } + +test "LODRenderer createGPUBridge and toInterface round-trip" { + const allocator = std.testing.allocator; + + const MockRHIState = struct { + upload_calls: u32 = 0, + destroy_calls: u32 = 0, + wait_idle_calls: u32 = 0, + draw_calls: u32 = 0, + set_matrix_calls: u32 = 0, + }; + + const MockRHI = struct { + state: *MockRHIState, + + pub fn createBuffer(self: @This(), _: usize, _: anytype) !u32 { + _ = self; + return 1; + } + pub fn destroyBuffer(self: @This(), _: u32) void { + self.state.destroy_calls += 1; + } + pub fn uploadBuffer(self: @This(), _: u32, _: []const u8) !void { + self.state.upload_calls += 1; + } + pub fn getFrameIndex(_: @This()) usize { + return 0; + } + pub fn waitIdle(self: @This()) void { + self.state.wait_idle_calls += 1; + } + pub fn setModelMatrix(self: @This(), _: Mat4, _: Vec3, _: f32) void { + self.state.set_matrix_calls += 1; + } + pub fn setLODInstanceBuffer(_: @This(), _: anytype) void {} + pub fn draw(self: @This(), _: u32, _: u32, _: anytype) void { + self.state.draw_calls += 1; + } + }; + + var mock_state = MockRHIState{}; + const mock_rhi = MockRHI{ .state = &mock_state }; + + const Renderer = LODRenderer(MockRHI); + const renderer = try Renderer.init(allocator, mock_rhi); + defer renderer.deinit(); + + // Test createGPUBridge round-trip + const bridge = renderer.createGPUBridge(); + + // Verify bridge.waitIdle calls through to MockRHI.waitIdle + bridge.waitIdle(); + try std.testing.expectEqual(@as(u32, 1), mock_state.wait_idle_calls); + + // Verify bridge.destroy calls through to MockRHI.destroyBuffer (via LODMesh.deinit) + var test_mesh = LODMesh.init(allocator, .lod1); + test_mesh.buffer_handle = 99; + bridge.destroy(&test_mesh); + try std.testing.expectEqual(@as(u32, 1), mock_state.destroy_calls); + try std.testing.expectEqual(@as(u32, 0), test_mesh.buffer_handle); // deinit zeroes handle + + // Test toInterface round-trip: render through type-erased interface + const iface = renderer.toInterface(); + + // Set up meshes/regions with a renderable chunk + var meshes: [LODLevel.count]MeshMap = undefined; + var regions: [LODLevel.count]RegionMap = undefined; + for (0..LODLevel.count) |i| { + meshes[i] = MeshMap.init(allocator); + regions[i] = RegionMap.init(allocator); + } + defer { + for (0..LODLevel.count) |i| { + meshes[i].deinit(); + regions[i].deinit(); + } + } + + var mesh = LODMesh.init(allocator, .lod1); + mesh.buffer_handle = 42; + mesh.vertex_count = 50; + mesh.ready = true; + + var chunk = LODChunk.init(0, 0, .lod1); + chunk.state = .renderable; + + const key = LODRegionKey{ .rx = 0, .rz = 0, .lod = .lod1 }; + try meshes[1].put(key, &mesh); + try regions[1].put(key, &chunk); + + var mock_config = LODConfig{}; + + // Render through the type-erased interface + iface.render(&meshes, ®ions, mock_config.interface(), Mat4.identity, Vec3.zero, null, null, true); + + // Verify the real renderer's draw was invoked through the interface + try std.testing.expectEqual(@as(u32, 1), mock_state.draw_calls); + try std.testing.expectEqual(@as(u32, 1), mock_state.set_matrix_calls); +} diff --git a/src/world/lod_upload_queue.zig b/src/world/lod_upload_queue.zig new file mode 100644 index 00000000..d292ff03 --- /dev/null +++ b/src/world/lod_upload_queue.zig @@ -0,0 +1,110 @@ +//! LOD GPU Bridge - callback interfaces that decouple LOD logic from GPU operations. +//! +//! Extracted from LODManager (Issue #246) to satisfy Single Responsibility Principle. +//! LODManager uses these interfaces instead of holding a direct RHI reference. + +const std = @import("std"); +const lod_chunk = @import("lod_chunk.zig"); +const LODLevel = lod_chunk.LODLevel; +const LODChunk = lod_chunk.LODChunk; +const LODRegionKey = lod_chunk.LODRegionKey; +const LODRegionKeyContext = lod_chunk.LODRegionKeyContext; +const ILODConfig = lod_chunk.ILODConfig; +const LODMesh = @import("lod_mesh.zig").LODMesh; +const Vec3 = @import("../engine/math/vec3.zig").Vec3; +const Mat4 = @import("../engine/math/mat4.zig").Mat4; +const rhi_types = @import("../engine/graphics/rhi_types.zig"); +const RhiError = rhi_types.RhiError; + +/// Callback interface for GPU data operations (upload, destroy, sync). +/// Created by the caller who owns the concrete RHI, passed to LODManager. +pub const LODGPUBridge = struct { + /// Upload pending vertex data for a mesh to GPU buffers. + on_upload: *const fn (mesh: *LODMesh, ctx: *anyopaque) RhiError!void, + /// Destroy GPU resources owned by a mesh. + on_destroy: *const fn (mesh: *LODMesh, ctx: *anyopaque) void, + /// Wait for GPU to finish all pending work (needed before batch deletion). + on_wait_idle: *const fn (ctx: *anyopaque) void, + /// Opaque context pointer (typically the concrete RHI instance). + ctx: *anyopaque, + + fn hasInvalidCtx(self: LODGPUBridge) bool { + const ctx_addr = @intFromPtr(self.ctx); + return ctx_addr == 0 or ctx_addr == 0xaaaa_aaaa_aaaa_aaaa; + } + + /// Validate that ctx is not undefined/null. + fn assertValidCtx(self: LODGPUBridge) void { + std.debug.assert(!self.hasInvalidCtx()); + } + + pub fn upload(self: LODGPUBridge, mesh: *LODMesh) RhiError!void { + if (self.hasInvalidCtx()) return error.InvalidState; + self.assertValidCtx(); + return self.on_upload(mesh, self.ctx); + } + + pub fn destroy(self: LODGPUBridge, mesh: *LODMesh) void { + if (self.hasInvalidCtx()) { + std.log.err("LODGPUBridge.destroy called with invalid context pointer", .{}); + return; + } + self.assertValidCtx(); + self.on_destroy(mesh, self.ctx); + } + + pub fn waitIdle(self: LODGPUBridge) void { + if (self.hasInvalidCtx()) { + std.log.err("LODGPUBridge.waitIdle called with invalid context pointer", .{}); + return; + } + self.assertValidCtx(); + self.on_wait_idle(self.ctx); + } +}; + +/// Type aliases used by LODRenderInterface for mesh/region maps. +pub const MeshMap = std.HashMap(LODRegionKey, *LODMesh, LODRegionKeyContext, 80); +pub const RegionMap = std.HashMap(LODRegionKey, *LODChunk, LODRegionKeyContext, 80); + +/// Callback type to check if a regular chunk is loaded and renderable. +pub const ChunkChecker = *const fn (chunk_x: i32, chunk_z: i32, ctx: *anyopaque) bool; + +/// Type-erased interface for LOD rendering. +/// Allows LODManager to delegate rendering without knowing the concrete RHI type. +pub const LODRenderInterface = struct { + /// Render LOD meshes using the provided data. + render_fn: *const fn ( + self_ptr: *anyopaque, + meshes: *const [LODLevel.count]MeshMap, + regions: *const [LODLevel.count]RegionMap, + config: ILODConfig, + view_proj: Mat4, + camera_pos: Vec3, + chunk_checker: ?ChunkChecker, + checker_ctx: ?*anyopaque, + use_frustum: bool, + ) void, + /// Destroy renderer resources. + deinit_fn: *const fn (self_ptr: *anyopaque) void, + /// Opaque pointer to the concrete renderer. + ptr: *anyopaque, + + pub fn render( + self: LODRenderInterface, + meshes: *const [LODLevel.count]MeshMap, + regions: *const [LODLevel.count]RegionMap, + config: ILODConfig, + view_proj: Mat4, + camera_pos: Vec3, + chunk_checker: ?ChunkChecker, + checker_ctx: ?*anyopaque, + use_frustum: bool, + ) void { + self.render_fn(self.ptr, meshes, regions, config, view_proj, camera_pos, chunk_checker, checker_ctx, use_frustum); + } + + pub fn deinit(self: LODRenderInterface) void { + self.deinit_fn(self.ptr); + } +}; diff --git a/src/world/meshing/ao_calculator.zig b/src/world/meshing/ao_calculator.zig new file mode 100644 index 00000000..61f64798 --- /dev/null +++ b/src/world/meshing/ao_calculator.zig @@ -0,0 +1,102 @@ +//! Ambient occlusion calculation for chunk meshing. +//! +//! Computes per-vertex AO values by sampling three neighbor blocks +//! (two orthogonal sides + diagonal corner) around each quad corner. + +const Chunk = @import("../chunk.zig").Chunk; +const CHUNK_SIZE_X = @import("../chunk.zig").CHUNK_SIZE_X; +const CHUNK_SIZE_Y = @import("../chunk.zig").CHUNK_SIZE_Y; +const CHUNK_SIZE_Z = @import("../chunk.zig").CHUNK_SIZE_Z; +const BlockType = @import("../block.zig").BlockType; +const Face = @import("../block.zig").Face; +const block_registry = @import("../block_registry.zig"); +const boundary = @import("boundary.zig"); +const NeighborChunks = boundary.NeighborChunks; + +/// Get AO occlusion value at a block position (1.0 = occluding, 0.0 = open). +/// Uses cross-chunk neighbor lookup for positions near chunk edges. +pub inline fn getAOAt(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, y: i32, z: i32) f32 { + if (y < 0 or y >= CHUNK_SIZE_Y) return 0; + + const b: BlockType = blk: { + if (x < 0) { + if (z < 0 or z >= CHUNK_SIZE_Z) break :blk .air; // Lack of diagonal neighbors + break :blk if (neighbors.west) |w| w.getBlock(CHUNK_SIZE_X - 1, @intCast(y), @intCast(z)) else .air; + } else if (x >= CHUNK_SIZE_X) { + if (z < 0 or z >= CHUNK_SIZE_Z) break :blk .air; + break :blk if (neighbors.east) |e| e.getBlock(0, @intCast(y), @intCast(z)) else .air; + } else if (z < 0) { + // x is already checked to be [0, CHUNK_SIZE_X-1] + break :blk if (neighbors.north) |n| n.getBlock(@intCast(x), @intCast(y), CHUNK_SIZE_Z - 1) else .air; + } else if (z >= CHUNK_SIZE_Z) { + break :blk if (neighbors.south) |s| s.getBlock(@intCast(x), @intCast(y), 0) else .air; + } else { + break :blk chunk.getBlock(@intCast(x), @intCast(y), @intCast(z)); + } + }; + + const b_def = block_registry.getBlockDefinition(b); + return if (b_def.is_solid and !b_def.is_transparent) 1.0 else 0.0; +} + +/// Compute AO for a single vertex from three neighbor samples. +/// s1, s2: orthogonal side neighbors; c: diagonal corner neighbor. +/// Returns AO factor in range [0.4, 1.0] where 1.0 = no occlusion. +pub inline fn calculateVertexAO(s1: f32, s2: f32, c: f32) f32 { + if (s1 > 0.5 and s2 > 0.5) return 0.4; + return 1.0 - (s1 + s2 + c) * 0.2; +} + +/// Calculate AO values for all 4 corners of a greedy quad. +/// Returns an array of 4 AO factors ready for vertex emission. +pub inline fn calculateQuadAO( + chunk: *const Chunk, + neighbors: NeighborChunks, + axis: Face, + forward: bool, + p: [4][3]f32, +) [4]f32 { + var ao: [4]f32 = undefined; + for (0..4) |i| { + const vertex_pos = p[i]; + const center = [3]f32{ + (p[0][0] + p[2][0]) * 0.5, + (p[0][1] + p[2][1]) * 0.5, + (p[0][2] + p[2][2]) * 0.5, + }; + + const dir_x: i32 = if (vertex_pos[0] > center[0]) 0 else -1; + const dir_y: i32 = if (vertex_pos[1] > center[1]) 0 else -1; + const dir_z: i32 = if (vertex_pos[2] > center[2]) 0 else -1; + + const vx = @as(i32, @intFromFloat(@floor(vertex_pos[0]))); + const vy = @as(i32, @intFromFloat(@floor(vertex_pos[1]))); + const vz = @as(i32, @intFromFloat(@floor(vertex_pos[2]))); + + var s1: f32 = 0; + var s2: f32 = 0; + var c: f32 = 0; + + if (axis == .top) { + const y_off: i32 = if (forward) 0 else -1; + s1 = getAOAt(chunk, neighbors, vx + dir_x, vy + y_off, vz); + s2 = getAOAt(chunk, neighbors, vx, vy + y_off, vz + dir_z); + c = getAOAt(chunk, neighbors, vx + dir_x, vy + y_off, vz + dir_z); + } else if (axis == .east) { + const x_off: i32 = if (forward) 0 else -1; + s1 = getAOAt(chunk, neighbors, vx + x_off, vy + dir_y, vz); + s2 = getAOAt(chunk, neighbors, vx + x_off, vy, vz + dir_z); + c = getAOAt(chunk, neighbors, vx + x_off, vy + dir_y, vz + dir_z); + } else if (axis == .south) { + const z_off: i32 = if (forward) 0 else -1; + s1 = getAOAt(chunk, neighbors, vx + dir_x, vy, vz + z_off); + s2 = getAOAt(chunk, neighbors, vx, vy + dir_y, vz + z_off); + c = getAOAt(chunk, neighbors, vx + dir_x, vy + dir_y, vz + z_off); + } else { + unreachable; + } + + ao[i] = calculateVertexAO(s1, s2, c); + } + return ao; +} diff --git a/src/world/meshing/biome_color_sampler.zig b/src/world/meshing/biome_color_sampler.zig new file mode 100644 index 00000000..f6f0df72 --- /dev/null +++ b/src/world/meshing/biome_color_sampler.zig @@ -0,0 +1,74 @@ +//! Biome color blending for chunk meshing. +//! +//! Computes biome-tinted colors for blocks using 3x3 biome averaging. +//! Only grass (top face), leaves, and water receive biome tints. + +const std = @import("std"); +const Chunk = @import("../chunk.zig").Chunk; +const BlockType = @import("../block.zig").BlockType; +const Face = @import("../block.zig").Face; +const biome_mod = @import("../worldgen/biome.zig"); +const boundary = @import("boundary.zig"); +const NeighborChunks = boundary.NeighborChunks; + +/// Calculate the biome-tinted color for a block face. +/// Returns {1, 1, 1} (no tint) for blocks that don't receive biome coloring. +/// `s`, `u`, `v` are local coordinates on the slice plane (depending on `axis`). +pub inline fn getBlockColor(chunk: *const Chunk, neighbors: NeighborChunks, axis: Face, s: i32, u: u32, v: u32, block: BlockType) [3]f32 { + // Only apply biome tint to top face of grass, and all faces of leaves/water + if (block == .grass) { + // Grass: only tint the top face, sides and bottom get no tint + if (axis != .top) return .{ 1.0, 1.0, 1.0 }; + } else if (block != .leaves and block != .water) { + return .{ 1.0, 1.0, 1.0 }; + } + + var x: i32 = undefined; + var z: i32 = undefined; + + switch (axis) { + .top => { + x = @intCast(u); + z = @intCast(v); + }, + .east => { + x = s; + z = @intCast(v); + }, + .south => { + x = @intCast(u); + z = s; + }, + else => { + x = @intCast(u); + z = @intCast(v); + }, + } + + var r: f32 = 0; + var g: f32 = 0; + var b: f32 = 0; + var count: f32 = 0; + + var ox: i32 = -1; + while (ox <= 1) : (ox += 1) { + var oz: i32 = -1; + while (oz <= 1) : (oz += 1) { + const biome_id = boundary.getBiomeAt(chunk, neighbors, x + ox, z + oz); + const def = biome_mod.getBiomeDefinition(biome_id); + const col = switch (block) { + .grass => def.colors.grass, + .leaves => def.colors.foliage, + .water => def.colors.water, + else => .{ 1.0, 1.0, 1.0 }, + }; + r += col[0]; + g += col[1]; + b += col[2]; + count += 1.0; + } + } + + std.debug.assert(count > 0); + return .{ r / count, g / count, b / count }; +} diff --git a/src/world/meshing/boundary.zig b/src/world/meshing/boundary.zig new file mode 100644 index 00000000..c09d073c --- /dev/null +++ b/src/world/meshing/boundary.zig @@ -0,0 +1,128 @@ +//! Cross-chunk boundary utilities for meshing. +//! +//! Provides safe block, light, and biome lookups that cross chunk boundaries +//! using the four horizontal neighbor chunks. Shared by AO, lighting, and +//! biome color sampling stages. + +const Chunk = @import("../chunk.zig").Chunk; +const PackedLight = @import("../chunk.zig").PackedLight; +const CHUNK_SIZE_X = @import("../chunk.zig").CHUNK_SIZE_X; +const CHUNK_SIZE_Y = @import("../chunk.zig").CHUNK_SIZE_Y; +const CHUNK_SIZE_Z = @import("../chunk.zig").CHUNK_SIZE_Z; +const MAX_LIGHT = @import("../chunk.zig").MAX_LIGHT; +const BlockType = @import("../block.zig").BlockType; +const Face = @import("../block.zig").Face; +const biome_mod = @import("../worldgen/biome.zig"); +const std = @import("std"); + +pub const NeighborChunks = struct { + north: ?*const Chunk = null, + south: ?*const Chunk = null, + east: ?*const Chunk = null, + west: ?*const Chunk = null, + + pub const empty = NeighborChunks{ + .north = null, + .south = null, + .east = null, + .west = null, + }; +}; + +pub const SUBCHUNK_SIZE: u32 = 16; +pub const NUM_SUBCHUNKS: u32 = 16; + +/// Check if a face's emitting block falls within the current subchunk Y range. +pub inline fn isEmittingSubchunk(axis: Face, s: i32, u: u32, v: u32, y_min: i32, y_max: i32) bool { + const y: i32 = switch (axis) { + .top => s, + .east => @as(i32, @intCast(u)) + y_min, + .south => @as(i32, @intCast(v)) + y_min, + else => unreachable, + }; + return y >= y_min and y < y_max; +} + +/// Get the two blocks on either side of a face boundary. +pub inline fn getBlocksAtBoundary(chunk: *const Chunk, neighbors: NeighborChunks, axis: Face, s: i32, u: u32, v: u32, si: u32) [2]BlockType { + const y_off: i32 = @intCast(si * SUBCHUNK_SIZE); + return switch (axis) { + .top => .{ chunk.getBlockSafe(@intCast(u), s - 1, @intCast(v)), chunk.getBlockSafe(@intCast(u), s, @intCast(v)) }, + .east => .{ + getBlockCross(chunk, neighbors, s - 1, y_off + @as(i32, @intCast(u)), @intCast(v)), + getBlockCross(chunk, neighbors, s, y_off + @as(i32, @intCast(u)), @intCast(v)), + }, + .south => .{ + getBlockCross(chunk, neighbors, @intCast(u), y_off + @as(i32, @intCast(v)), s - 1), + getBlockCross(chunk, neighbors, @intCast(u), y_off + @as(i32, @intCast(v)), s), + }, + else => unreachable, + }; +} + +/// Get block type with cross-chunk neighbor lookup. +pub inline fn getBlockCross(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, y: i32, z: i32) BlockType { + if (x < 0) return if (neighbors.west) |w| w.getBlockSafe(CHUNK_SIZE_X - 1, y, z) else .air; + if (x >= CHUNK_SIZE_X) return if (neighbors.east) |e| e.getBlockSafe(0, y, z) else .air; + if (z < 0) return if (neighbors.north) |n| n.getBlockSafe(x, y, CHUNK_SIZE_Z - 1) else .air; + if (z >= CHUNK_SIZE_Z) return if (neighbors.south) |s| s.getBlockSafe(x, y, 0) else .air; + return chunk.getBlockSafe(x, y, z); +} + +/// Get light with cross-chunk neighbor lookup. +pub inline fn getLightCross(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, y: i32, z: i32) PackedLight { + if (y >= CHUNK_SIZE_Y) return PackedLight.init(MAX_LIGHT, 0); + if (y < 0) return PackedLight.init(0, 0); + + if (x < 0) return if (neighbors.west) |w| w.getLightSafe(CHUNK_SIZE_X - 1, y, z) else PackedLight.init(MAX_LIGHT, 0); + if (x >= CHUNK_SIZE_X) return if (neighbors.east) |e| e.getLightSafe(0, y, z) else PackedLight.init(MAX_LIGHT, 0); + if (z < 0) return if (neighbors.north) |n| n.getLightSafe(x, y, CHUNK_SIZE_Z - 1) else PackedLight.init(MAX_LIGHT, 0); + if (z >= CHUNK_SIZE_Z) return if (neighbors.south) |s| s.getLightSafe(x, y, 0) else PackedLight.init(MAX_LIGHT, 0); + return chunk.getLightSafe(x, y, z); +} + +/// Get biome ID with cross-chunk neighbor lookup. +/// Handles diagonal corners (both X and Z out of bounds) by clamping to the +/// nearest in-bounds corner of the appropriate neighbor or current chunk. +pub inline fn getBiomeAt(chunk: *const Chunk, neighbors: NeighborChunks, x: i32, z: i32) biome_mod.BiomeId { + // Diagonal corners: both X and Z are out of bounds simultaneously. + // We don't have diagonal neighbor chunks, so fall back to the X-axis + // neighbor at its clamped Z edge, or the current chunk's nearest corner. + if (x < 0 and z < 0) { + if (neighbors.west) |w| return w.getBiome(CHUNK_SIZE_X - 1, 0); + return chunk.getBiome(0, 0); + } + if (x < 0 and z >= CHUNK_SIZE_Z) { + if (neighbors.west) |w| return w.getBiome(CHUNK_SIZE_X - 1, CHUNK_SIZE_Z - 1); + return chunk.getBiome(0, CHUNK_SIZE_Z - 1); + } + if (x >= CHUNK_SIZE_X and z < 0) { + if (neighbors.east) |e| return e.getBiome(0, 0); + return chunk.getBiome(CHUNK_SIZE_X - 1, 0); + } + if (x >= CHUNK_SIZE_X and z >= CHUNK_SIZE_Z) { + if (neighbors.east) |e| return e.getBiome(0, CHUNK_SIZE_Z - 1); + return chunk.getBiome(CHUNK_SIZE_X - 1, CHUNK_SIZE_Z - 1); + } + + // Single-axis boundary cases (Z is guaranteed in-bounds here) + if (x < 0) { + if (neighbors.west) |w| return w.getBiome(CHUNK_SIZE_X - 1, @intCast(z)); + return chunk.getBiome(0, @intCast(z)); + } + if (x >= CHUNK_SIZE_X) { + if (neighbors.east) |e| return e.getBiome(0, @intCast(z)); + return chunk.getBiome(CHUNK_SIZE_X - 1, @intCast(z)); + } + + // X is in-bounds; only Z may be out of bounds + if (z < 0) { + if (neighbors.north) |n| return n.getBiome(@intCast(x), CHUNK_SIZE_Z - 1); + return chunk.getBiome(@intCast(x), 0); + } + if (z >= CHUNK_SIZE_Z) { + if (neighbors.south) |s| return s.getBiome(@intCast(x), 0); + return chunk.getBiome(@intCast(x), CHUNK_SIZE_Z - 1); + } + return chunk.getBiome(@intCast(x), @intCast(z)); +} diff --git a/src/world/meshing/greedy_mesher.zig b/src/world/meshing/greedy_mesher.zig new file mode 100644 index 00000000..3f047ba8 --- /dev/null +++ b/src/world/meshing/greedy_mesher.zig @@ -0,0 +1,282 @@ +//! Greedy meshing algorithm for chunk mesh generation. +//! +//! Builds 16x16 face masks for each slice along an axis, then greedily +//! merges adjacent faces with matching properties into larger quads. +//! Delegates AO, lighting, and biome color to their respective modules. + +const std = @import("std"); + +const Chunk = @import("../chunk.zig").Chunk; +const PackedLight = @import("../chunk.zig").PackedLight; +const CHUNK_SIZE_X = @import("../chunk.zig").CHUNK_SIZE_X; +const CHUNK_SIZE_Z = @import("../chunk.zig").CHUNK_SIZE_Z; +const BlockType = @import("../block.zig").BlockType; +const Face = @import("../block.zig").Face; +const block_registry = @import("../block_registry.zig"); +const TextureAtlas = @import("../../engine/graphics/texture_atlas.zig").TextureAtlas; +const rhi_mod = @import("../../engine/graphics/rhi.zig"); +const Vertex = rhi_mod.Vertex; + +const boundary = @import("boundary.zig"); +const NeighborChunks = boundary.NeighborChunks; +const SUBCHUNK_SIZE = boundary.SUBCHUNK_SIZE; + +const ao_calculator = @import("ao_calculator.zig"); +const lighting_sampler = @import("lighting_sampler.zig"); +const biome_color_sampler = @import("biome_color_sampler.zig"); + +/// Maximum light level difference (per channel) allowed when merging adjacent +/// faces into a single greedy quad. A tolerance of 1 produces imperceptible +/// banding while significantly reducing vertex count. +const MAX_LIGHT_DIFF_FOR_MERGE: u8 = 1; + +/// Maximum per-channel color difference allowed when merging adjacent faces. +/// 0.02 is roughly 5/256 — below the perceptible threshold for biome tint +/// gradients, keeping quads large without visible color steps. +const MAX_COLOR_DIFF_FOR_MERGE: f32 = 0.02; + +const FaceKey = struct { + block: BlockType, + side: bool, + light: PackedLight, + color: [3]f32, +}; + +/// Process a single 16x16 slice along the given axis, producing greedy-merged quads. +/// Populates solid_list and fluid_list with generated vertices. +pub fn meshSlice( + allocator: std.mem.Allocator, + chunk: *const Chunk, + neighbors: NeighborChunks, + axis: Face, + s: i32, + si: u32, + solid_list: *std.ArrayListUnmanaged(Vertex), + fluid_list: *std.ArrayListUnmanaged(Vertex), + atlas: *const TextureAtlas, +) !void { + if (axis != .top and axis != .east and axis != .south) return error.UnsupportedFace; + + const du: u32 = 16; + const dv: u32 = 16; + var mask = try allocator.alloc(?FaceKey, du * dv); + defer allocator.free(mask); + @memset(mask, null); + + // Phase 1: Build the face mask + var v: u32 = 0; + while (v < dv) : (v += 1) { + var u: u32 = 0; + while (u < du) : (u += 1) { + const res = boundary.getBlocksAtBoundary(chunk, neighbors, axis, s, u, v, si); + const b1 = res[0]; + const b2 = res[1]; + + const y_min: i32 = @intCast(si * SUBCHUNK_SIZE); + const y_max: i32 = y_min + SUBCHUNK_SIZE; + + const b1_def = block_registry.getBlockDefinition(b1); + const b2_def = block_registry.getBlockDefinition(b2); + + const b1_emits = b1_def.is_solid or (b1_def.is_fluid and !b2_def.is_fluid); + const b2_emits = b2_def.is_solid or (b2_def.is_fluid and !b1_def.is_fluid); + + if (boundary.isEmittingSubchunk(axis, s - 1, u, v, y_min, y_max) and b1_emits and !b2_def.occludes(b1_def, axis)) { + const light = lighting_sampler.sampleLightAtBoundary(chunk, neighbors, axis, s, u, v, si); + const color = biome_color_sampler.getBlockColor(chunk, neighbors, axis, s - 1, u, v, b1); + mask[u + v * du] = .{ .block = b1, .side = true, .light = light, .color = color }; + } else if (boundary.isEmittingSubchunk(axis, s, u, v, y_min, y_max) and b2_emits and !b1_def.occludes(b2_def, axis)) { + const light = lighting_sampler.sampleLightAtBoundary(chunk, neighbors, axis, s, u, v, si); + const color = biome_color_sampler.getBlockColor(chunk, neighbors, axis, s, u, v, b2); + mask[u + v * du] = .{ .block = b2, .side = false, .light = light, .color = color }; + } + } + } + + // Phase 2: Greedy rectangle expansion + var sv: u32 = 0; + while (sv < dv) : (sv += 1) { + var su: u32 = 0; + while (su < du) : (su += 1) { + const k_opt = mask[su + sv * du]; + if (k_opt == null) continue; + const k = k_opt.?; + + var width: u32 = 1; + while (su + width < du) : (width += 1) { + const nxt_opt = mask[su + width + sv * du]; + if (nxt_opt == null) break; + const nxt = nxt_opt.?; + if (nxt.block != k.block or nxt.side != k.side) break; + const sky_diff = @as(i8, @intCast(nxt.light.getSkyLight())) - @as(i8, @intCast(k.light.getSkyLight())); + const r_diff = @as(i8, @intCast(nxt.light.getBlockLightR())) - @as(i8, @intCast(k.light.getBlockLightR())); + const g_diff = @as(i8, @intCast(nxt.light.getBlockLightG())) - @as(i8, @intCast(k.light.getBlockLightG())); + const b_diff = @as(i8, @intCast(nxt.light.getBlockLightB())) - @as(i8, @intCast(k.light.getBlockLightB())); + if (@abs(sky_diff) > MAX_LIGHT_DIFF_FOR_MERGE or @abs(r_diff) > MAX_LIGHT_DIFF_FOR_MERGE or @abs(g_diff) > MAX_LIGHT_DIFF_FOR_MERGE or @abs(b_diff) > MAX_LIGHT_DIFF_FOR_MERGE) break; + + const diff_r = @abs(nxt.color[0] - k.color[0]); + const diff_g = @abs(nxt.color[1] - k.color[1]); + const diff_b = @abs(nxt.color[2] - k.color[2]); + if (diff_r > MAX_COLOR_DIFF_FOR_MERGE or diff_g > MAX_COLOR_DIFF_FOR_MERGE or diff_b > MAX_COLOR_DIFF_FOR_MERGE) break; + } + var height: u32 = 1; + var dvh: u32 = 1; + outer: while (sv + dvh < dv) : (dvh += 1) { + var duw: u32 = 0; + while (duw < width) : (duw += 1) { + const nxt_opt = mask[su + duw + (sv + dvh) * du]; + if (nxt_opt == null) break :outer; + const nxt = nxt_opt.?; + if (nxt.block != k.block or nxt.side != k.side) break :outer; + const sky_diff = @as(i8, @intCast(nxt.light.getSkyLight())) - @as(i8, @intCast(k.light.getSkyLight())); + const r_diff = @as(i8, @intCast(nxt.light.getBlockLightR())) - @as(i8, @intCast(k.light.getBlockLightR())); + const g_diff = @as(i8, @intCast(nxt.light.getBlockLightG())) - @as(i8, @intCast(k.light.getBlockLightG())); + const b_diff = @as(i8, @intCast(nxt.light.getBlockLightB())) - @as(i8, @intCast(k.light.getBlockLightB())); + if (@abs(sky_diff) > MAX_LIGHT_DIFF_FOR_MERGE or @abs(r_diff) > MAX_LIGHT_DIFF_FOR_MERGE or @abs(g_diff) > MAX_LIGHT_DIFF_FOR_MERGE or @abs(b_diff) > MAX_LIGHT_DIFF_FOR_MERGE) break :outer; + + const diff_r = @abs(nxt.color[0] - k.color[0]); + const diff_g = @abs(nxt.color[1] - k.color[1]); + const diff_b = @abs(nxt.color[2] - k.color[2]); + if (diff_r > MAX_COLOR_DIFF_FOR_MERGE or diff_g > MAX_COLOR_DIFF_FOR_MERGE or diff_b > MAX_COLOR_DIFF_FOR_MERGE) break :outer; + } + height += 1; + } + + const k_def = block_registry.getBlockDefinition(k.block); + const target = if (k_def.render_pass == .fluid) fluid_list else solid_list; + try addGreedyFace(allocator, target, axis, s, su, sv, width, height, k_def, k.side, si, k.light, k.color, chunk, neighbors, atlas); + + var dy: u32 = 0; + while (dy < height) : (dy += 1) { + var dx: u32 = 0; + while (dx < width) : (dx += 1) { + mask[su + dx + (sv + dy) * du] = null; + } + } + su += width - 1; + } + } +} + +/// Generate 6 vertices (2 triangles) for a greedy-merged quad. +/// Computes positions, UVs, normals, AO, lighting, and biome-tinted colors. +fn addGreedyFace( + allocator: std.mem.Allocator, + verts: *std.ArrayListUnmanaged(Vertex), + axis: Face, + s: i32, + u: u32, + v: u32, + w: u32, + h: u32, + block_def: *const block_registry.BlockDefinition, + forward: bool, + si: u32, + light: PackedLight, + tint: [3]f32, + chunk: *const Chunk, + neighbors: NeighborChunks, + atlas: *const TextureAtlas, +) !void { + const face = if (forward) axis else switch (axis) { + .top => Face.bottom, + .east => Face.west, + .south => Face.north, + else => return error.UnsupportedFace, + }; + const base_col = block_def.getFaceColor(face); + const col = [3]f32{ base_col[0] * tint[0], base_col[1] * tint[1], base_col[2] * tint[2] }; + const norm = face.getNormal(); + const nf = [3]f32{ @floatFromInt(norm[0]), @floatFromInt(norm[1]), @floatFromInt(norm[2]) }; + const tiles = atlas.getTilesForBlock(@intFromEnum(block_def.id)); + const tid: f32 = @floatFromInt(switch (face) { + .top => tiles.top, + .bottom => tiles.bottom, + else => tiles.side, + }); + const wf: f32 = @floatFromInt(w); + const hf: f32 = @floatFromInt(h); + const sf: f32 = @floatFromInt(s); + const uf: f32 = @floatFromInt(u); + const vf: f32 = @floatFromInt(v); + + var p: [4][3]f32 = undefined; + var uv: [4][2]f32 = undefined; + switch (axis) { + .top => { + const y = sf; + if (forward) { + p[0] = .{ uf, y, vf + hf }; + p[1] = .{ uf + wf, y, vf + hf }; + p[2] = .{ uf + wf, y, vf }; + p[3] = .{ uf, y, vf }; + } else { + p[0] = .{ uf, y, vf }; + p[1] = .{ uf + wf, y, vf }; + p[2] = .{ uf + wf, y, vf + hf }; + p[3] = .{ uf, y, vf + hf }; + } + uv = [4][2]f32{ .{ 0, 0 }, .{ wf, 0 }, .{ wf, hf }, .{ 0, hf } }; + }, + .east => { + const x = sf; + const y0: f32 = @floatFromInt(si * SUBCHUNK_SIZE); + if (forward) { + p[0] = .{ x, y0 + uf, vf + hf }; + p[1] = .{ x, y0 + uf, vf }; + p[2] = .{ x, y0 + uf + wf, vf }; + p[3] = .{ x, y0 + uf + wf, vf + hf }; + } else { + p[0] = .{ x, y0 + uf, vf }; + p[1] = .{ x, y0 + uf, vf + hf }; + p[2] = .{ x, y0 + uf + wf, vf + hf }; + p[3] = .{ x, y0 + uf + wf, vf }; + } + uv = [4][2]f32{ .{ 0, wf }, .{ hf, wf }, .{ hf, 0 }, .{ 0, 0 } }; + }, + .south => { + const z = sf; + const y0: f32 = @floatFromInt(si * SUBCHUNK_SIZE); + if (forward) { + p[0] = .{ uf, y0 + vf, z }; + p[1] = .{ uf + wf, y0 + vf, z }; + p[2] = .{ uf + wf, y0 + vf + hf, z }; + p[3] = .{ uf, y0 + vf + hf, z }; + } else { + p[0] = .{ uf + wf, y0 + vf, z }; + p[1] = .{ uf, y0 + vf, z }; + p[2] = .{ uf, y0 + vf + hf, z }; + p[3] = .{ uf + wf, y0 + vf + hf, z }; + } + uv = [4][2]f32{ .{ 0, hf }, .{ wf, hf }, .{ wf, 0 }, .{ 0, 0 } }; + }, + else => return error.UnsupportedFace, + } + + // Calculate AO for all 4 corners + const ao = ao_calculator.calculateQuadAO(chunk, neighbors, axis, forward, p); + + // Choose triangle orientation to minimize AO artifacts (flipping the diagonal) + var idxs: [6]usize = undefined; + if (ao[0] + ao[2] < ao[1] + ao[3]) { + idxs = .{ 1, 2, 3, 1, 3, 0 }; + } else { + idxs = .{ 0, 1, 2, 0, 2, 3 }; + } + + // Normalize light values + const norm_light = lighting_sampler.normalizeLightValues(light); + + for (idxs) |i| { + try verts.append(allocator, Vertex{ + .pos = p[i], + .color = col, + .normal = nf, + .uv = uv[i], + .tile_id = tid, + .skylight = norm_light.skylight, + .blocklight = norm_light.blocklight, + .ao = ao[i], + }); + } +} diff --git a/src/world/meshing/lighting_sampler.zig b/src/world/meshing/lighting_sampler.zig new file mode 100644 index 00000000..0af2a070 --- /dev/null +++ b/src/world/meshing/lighting_sampler.zig @@ -0,0 +1,40 @@ +//! Light sampling for chunk meshing. +//! +//! Extracts sky and block light values at face boundaries, +//! with cross-chunk neighbor fallback for edges. + +const Chunk = @import("../chunk.zig").Chunk; +const PackedLight = @import("../chunk.zig").PackedLight; +const Face = @import("../block.zig").Face; +const boundary = @import("boundary.zig"); +const NeighborChunks = boundary.NeighborChunks; +const SUBCHUNK_SIZE = boundary.SUBCHUNK_SIZE; + +/// Normalized light values ready for vertex emission. +pub const NormalizedLight = struct { + skylight: f32, + blocklight: [3]f32, +}; + +/// Sample light at a face boundary, using cross-chunk neighbor lookup for X/Z axes. +pub inline fn sampleLightAtBoundary(chunk: *const Chunk, neighbors: NeighborChunks, axis: Face, s: i32, u: u32, v: u32, si: u32) PackedLight { + const y_off: i32 = @intCast(si * SUBCHUNK_SIZE); + return switch (axis) { + .top => chunk.getLightSafe(@intCast(u), s, @intCast(v)), + .east => boundary.getLightCross(chunk, neighbors, s, y_off + @as(i32, @intCast(u)), @intCast(v)), + .south => boundary.getLightCross(chunk, neighbors, @intCast(u), y_off + @as(i32, @intCast(v)), s), + else => unreachable, + }; +} + +/// Convert a PackedLight into normalized [0.0, 1.0] values for vertex attributes. +pub inline fn normalizeLightValues(light: PackedLight) NormalizedLight { + return .{ + .skylight = @as(f32, @floatFromInt(light.getSkyLight())) / 15.0, + .blocklight = .{ + @as(f32, @floatFromInt(light.getBlockLightR())) / 15.0, + @as(f32, @floatFromInt(light.getBlockLightG())) / 15.0, + @as(f32, @floatFromInt(light.getBlockLightB())) / 15.0, + }, + }; +} diff --git a/src/world/world.zig b/src/world/world.zig index 54bc97eb..d9051f56 100644 --- a/src/world/world.zig +++ b/src/world/world.zig @@ -18,7 +18,8 @@ const registry = @import("worldgen/registry.zig"); const GlobalVertexAllocator = @import("chunk_allocator.zig").GlobalVertexAllocator; const rhi_mod = @import("../engine/graphics/rhi.zig"); const RHI = rhi_mod.RHI; -const LODManager = @import("lod_manager.zig").LODManager(RHI); +const LODManager = @import("lod_manager.zig").LODManager; +const LODRenderer = @import("lod_renderer.zig").LODRenderer(RHI); const Vec3 = @import("../engine/math/vec3.zig").Vec3; const Mat4 = @import("../engine/math/mat4.zig").Mat4; const Frustum = @import("../engine/math/frustum.zig").Frustum; @@ -58,6 +59,7 @@ pub const World = struct { // LOD System (Issue #114) lod_manager: ?*LODManager, + lod_renderer: ?*LODRenderer, // Owned separately; LODManager holds a type-erased interface lod_enabled: bool, pub fn init(allocator: std.mem.Allocator, render_distance: i32, seed: u64, rhi: RHI, atlas: *const TextureAtlas) !*World { @@ -95,6 +97,7 @@ pub const World = struct { .safe_mode = safe_mode, .safe_render_distance = safe_render_distance, .lod_manager = null, + .lod_renderer = null, .lod_enabled = false, }; @@ -112,8 +115,16 @@ pub const World = struct { pub fn initGenWithLOD(generator_index: usize, allocator: std.mem.Allocator, render_distance: i32, seed: u64, rhi: RHI, lod_config: ILODConfig, atlas: *const TextureAtlas) !*World { const world = try initGen(generator_index, allocator, render_distance, seed, rhi, atlas); - // Initialize LOD manager with generator reference - world.lod_manager = try LODManager.init(allocator, lod_config, rhi, world.generator); + // Create LODRenderer (owns GPU draw resources, stays generic over RHI) + const lod_renderer = try LODRenderer.init(allocator, rhi); + + // Create GPU bridge + render interface from the concrete renderer + const gpu_bridge = lod_renderer.createGPUBridge(); + const render_iface = lod_renderer.toInterface(); + + // Initialize LOD manager with callback interfaces (no direct RHI dependency) + world.lod_manager = try LODManager.init(allocator, lod_config, gpu_bridge, render_iface, world.generator); + world.lod_renderer = lod_renderer; world.lod_enabled = true; const radii = lod_config.getRadii(); @@ -132,10 +143,15 @@ pub const World = struct { self.storage.deinitWithoutRHI(); self.renderer.deinit(); - // Cleanup LOD manager if enabled + // Cleanup LOD system if enabled. + // LODManager must be deinit'd first (it uses gpu_bridge callbacks that reference the renderer's RHI). + // LODRenderer is deinit'd second (it owns GPU draw buffers). if (self.lod_manager) |lod_mgr| { lod_mgr.deinit(); } + if (self.lod_renderer) |lod_rend| { + lod_rend.deinit(); + } self.generator.deinit(self.allocator); diff --git a/src/world/world_renderer.zig b/src/world/world_renderer.zig index 18b362e5..7ed57760 100644 --- a/src/world/world_renderer.zig +++ b/src/world/world_renderer.zig @@ -9,7 +9,7 @@ const CHUNK_SIZE_Z = @import("chunk.zig").CHUNK_SIZE_Z; const GlobalVertexAllocator = @import("chunk_allocator.zig").GlobalVertexAllocator; const rhi_mod = @import("../engine/graphics/rhi.zig"); const RHI = rhi_mod.RHI; -const LODManager = @import("lod_manager.zig").LODManager(RHI); +const LODManager = @import("lod_manager.zig").LODManager; const Vec3 = @import("../engine/math/vec3.zig").Vec3; const Mat4 = @import("../engine/math/mat4.zig").Mat4; const Frustum = @import("../engine/math/frustum.zig").Frustum; @@ -180,8 +180,10 @@ pub const WorldRenderer = struct { self.storage.chunks_mutex.lockShared(); defer self.storage.chunks_mutex.unlockShared(); + // FIX: Enable frustum culling for LOD chunks in shadow pass + // This ensures LOD chunks are properly culled using the light-space frustum if (lod_manager) |lod_mgr| { - lod_mgr.render(light_space_matrix, camera_pos, ChunkStorage.isChunkRenderable, @ptrCast(self.storage), false); + lod_mgr.render(light_space_matrix, camera_pos, ChunkStorage.isChunkRenderable, @ptrCast(self.storage), true); } const frustum = shadow_frustum; diff --git a/src/world/worldgen/biome.zig b/src/world/worldgen/biome.zig index 57758149..414cf208 100644 --- a/src/world/worldgen/biome.zig +++ b/src/world/worldgen/biome.zig @@ -1,1053 +1,104 @@ -//! Data-driven biome system per biomes.md spec -//! Each biome is defined by parameter ranges and evaluated by scoring algorithm - -const std = @import("std"); -const BlockType = @import("../block.zig").BlockType; -const tree_registry = @import("tree_registry.zig"); -pub const TreeType = tree_registry.TreeType; - -/// Minimum sum threshold for biome blend calculation to avoid division by near-zero values -const BLEND_EPSILON: f32 = 0.0001; - -/// Represents a range of values for biome parameter matching -pub const Range = struct { - min: f32, - max: f32, - - /// Check if a value falls within this range - pub fn contains(self: Range, value: f32) bool { - return value >= self.min and value <= self.max; - } - - /// Get normalized distance from center (0 = at center, 1 = at edge) - pub fn distanceFromCenter(self: Range, value: f32) f32 { - const center = (self.min + self.max) * 0.5; - const half_width = (self.max - self.min) * 0.5; - if (half_width <= 0) return if (value == center) 0 else 1; - return @min(1.0, @abs(value - center) / half_width); - } - - /// Convenience for "any value" - pub fn any() Range { - return .{ .min = 0.0, .max = 1.0 }; - } -}; - -/// Color tints for visual biome identity (RGB 0-1) -pub const ColorTints = struct { - grass: [3]f32 = .{ 0.3, 0.65, 0.2 }, // Default green - foliage: [3]f32 = .{ 0.2, 0.5, 0.15 }, - water: [3]f32 = .{ 0.2, 0.4, 0.8 }, -}; - -/// Vegetation profile for biome-driven placement -pub const VegetationProfile = struct { - tree_types: []const TreeType = &.{.oak}, - tree_density: f32 = 0.05, // Probability per attempt - bush_density: f32 = 0.0, - grass_density: f32 = 0.0, - cactus_density: f32 = 0.0, - dead_bush_density: f32 = 0.0, - bamboo_density: f32 = 0.0, - melon_density: f32 = 0.0, - red_mushroom_density: f32 = 0.0, - brown_mushroom_density: f32 = 0.0, -}; - -/// Terrain modifiers applied during height computation -pub const TerrainModifier = struct { - /// Multiplier for hill/mountain amplitude (1.0 = normal) - height_amplitude: f32 = 1.0, - /// How much to smooth/flatten terrain (0 = no change, 1 = fully flat) - smoothing: f32 = 0.0, - /// Clamp height near sea level (for swamps) - clamp_to_sea_level: bool = false, - /// Additional height offset - height_offset: f32 = 0.0, -}; - -/// Surface block configuration -pub const SurfaceBlocks = struct { - top: BlockType = .grass, - filler: BlockType = .dirt, - depth_range: i32 = 3, -}; - -/// Complete biome definition - data-driven and extensible -pub const BiomeDefinition = struct { - id: BiomeId, - name: []const u8, - - // Parameter ranges for selection - temperature: Range, - humidity: Range, - elevation: Range = Range.any(), - continentalness: Range = Range.any(), - ruggedness: Range = Range.any(), - - // Structural constraints - terrain structure determines biome eligibility - min_height: i32 = 0, // Minimum absolute height (blocks from y=0) - max_height: i32 = 256, // Maximum absolute height - max_slope: i32 = 255, // Maximum allowed slope in blocks (0 = flat) - min_ridge_mask: f32 = 0.0, // Minimum ridge mask value - max_ridge_mask: f32 = 1.0, // Maximum ridge mask value - - // Selection tuning - priority: i32 = 0, // Higher priority wins ties - blend_weight: f32 = 1.0, // For future blending - - // Biome properties - surface: SurfaceBlocks = .{}, - vegetation: VegetationProfile = .{}, - terrain: TerrainModifier = .{}, - colors: ColorTints = .{}, - - /// Check if biome meets structural constraints (height, slope, continentalness, ridge) - pub fn meetsStructuralConstraints(self: BiomeDefinition, height: i32, slope: i32, continentalness: f32, ridge_mask: f32) bool { - if (height < self.min_height) return false; - if (height > self.max_height) return false; - if (slope > self.max_slope) return false; - if (!self.continentalness.contains(continentalness)) return false; - if (ridge_mask < self.min_ridge_mask or ridge_mask > self.max_ridge_mask) return false; - return true; - } - - /// Score how well this biome matches the given climate parameters - /// Only temperature, humidity, and elevation affect the score (structural already filtered) - pub fn scoreClimate(self: BiomeDefinition, params: ClimateParams) f32 { - // Check if within climate ranges - if (!self.temperature.contains(params.temperature)) return 0; - if (!self.humidity.contains(params.humidity)) return 0; - if (!self.elevation.contains(params.elevation)) return 0; - - // Compute weighted distance from ideal center - const t_dist = self.temperature.distanceFromCenter(params.temperature); - const h_dist = self.humidity.distanceFromCenter(params.humidity); - const e_dist = self.elevation.distanceFromCenter(params.elevation); - - // Average distance (lower is better) - const avg_dist = (t_dist + h_dist + e_dist) / 3.0; - - // Convert to score (higher is better), add priority bonus - return (1.0 - avg_dist) + @as(f32, @floatFromInt(self.priority)) * 0.01; - } -}; - -/// Climate parameters computed per (x,z) column -pub const ClimateParams = struct { - temperature: f32, // 0=cold, 1=hot (altitude-adjusted) - humidity: f32, // 0=dry, 1=wet - elevation: f32, // Normalized: 0=sea level, 1=max height - continentalness: f32, // 0=deep ocean, 1=deep inland - ruggedness: f32, // 0=smooth, 1=mountainous (erosion inverted) -}; - -/// Biome identifiers - matches existing enum in block.zig -/// Per worldgen-revamp.md Section 4.3: Add transition micro-biomes -pub const BiomeId = enum(u8) { - deep_ocean = 0, - ocean = 1, - beach = 2, - plains = 3, - forest = 4, - taiga = 5, - desert = 6, - snow_tundra = 7, - mountains = 8, - snowy_mountains = 9, - river = 10, - swamp = 11, // New biome from spec - mangrove_swamp = 12, - jungle = 13, - savanna = 14, - badlands = 15, - mushroom_fields = 16, - // Per worldgen-revamp.md Section 4.3: Transition micro-biomes - foothills = 17, // Plains <-> Mountains transition - marsh = 18, // Forest <-> Swamp transition - dry_plains = 19, // Desert <-> Forest/Plains transition - coastal_plains = 20, // Coastal no-tree zone -}; +//! Biome system facade — re-exports from specialized sub-modules. +//! +//! This file exists solely to preserve the existing import path +//! `@import("biome.zig")` used by 17+ files across the codebase. +//! All logic lives in the sub-modules: +//! - biome_registry.zig — Data definitions, types, BIOME_REGISTRY +//! - biome_selector.zig — Selection algorithms (Voronoi, score-based, blended) +//! - biome_edge_detector.zig — Edge detection, transition rules +//! - biome_color_provider.zig — Color lookup for LOD/minimap +//! - biome_source.zig — BiomeSource unified interface // ============================================================================ -// Edge Detection Types and Constants (Issue #102) +// Sub-module imports // ============================================================================ -/// Sampling step for edge detection (every N blocks) -pub const EDGE_STEP: u32 = 4; - -/// Radii to check for neighboring biomes (in world blocks) -pub const EDGE_CHECK_RADII = [_]u32{ 4, 8, 12 }; - -/// Target width of transition bands (blocks) -pub const EDGE_WIDTH: u32 = 8; - -/// Represents proximity to a biome boundary -pub const EdgeBand = enum(u2) { - none = 0, // No edge detected - outer = 1, // 8-12 blocks from boundary - middle = 2, // 4-8 blocks from boundary - inner = 3, // 0-4 blocks from boundary -}; - -/// Information about biome edge detection result -pub const BiomeEdgeInfo = struct { - base_biome: BiomeId, - neighbor_biome: ?BiomeId, // Different biome if edge detected - edge_band: EdgeBand, -}; - -/// Rule defining which biome pairs need a transition zone -pub const TransitionRule = struct { - biome_a: BiomeId, - biome_b: BiomeId, - transition: BiomeId, -}; - -/// Biome adjacency rules - pairs that need buffer biomes between them -pub const TRANSITION_RULES = [_]TransitionRule{ - // Hot/dry <-> Temperate - .{ .biome_a = .desert, .biome_b = .forest, .transition = .dry_plains }, - .{ .biome_a = .desert, .biome_b = .plains, .transition = .dry_plains }, - .{ .biome_a = .desert, .biome_b = .taiga, .transition = .dry_plains }, - .{ .biome_a = .desert, .biome_b = .jungle, .transition = .savanna }, - - // Cold <-> Temperate - .{ .biome_a = .snow_tundra, .biome_b = .plains, .transition = .taiga }, - .{ .biome_a = .snow_tundra, .biome_b = .forest, .transition = .taiga }, - - // Wetland <-> Forest - .{ .biome_a = .swamp, .biome_b = .forest, .transition = .marsh }, - .{ .biome_a = .swamp, .biome_b = .plains, .transition = .marsh }, - - // Mountain <-> Lowland - .{ .biome_a = .mountains, .biome_b = .plains, .transition = .foothills }, - .{ .biome_a = .mountains, .biome_b = .forest, .transition = .foothills }, - .{ .biome_a = .snowy_mountains, .biome_b = .taiga, .transition = .foothills }, - .{ .biome_a = .snowy_mountains, .biome_b = .snow_tundra, .transition = .foothills }, -}; - -/// Check if two biomes need a transition zone between them -pub fn needsTransition(a: BiomeId, b: BiomeId) bool { - for (TRANSITION_RULES) |rule| { - if ((rule.biome_a == a and rule.biome_b == b) or - (rule.biome_a == b and rule.biome_b == a)) - { - return true; - } - } - return false; -} - -/// Get the transition biome for a pair of biomes, if one is defined -pub fn getTransitionBiome(a: BiomeId, b: BiomeId) ?BiomeId { - for (TRANSITION_RULES) |rule| { - if ((rule.biome_a == a and rule.biome_b == b) or - (rule.biome_a == b and rule.biome_b == a)) - { - return rule.transition; - } - } - return null; -} +const biome_registry = @import("biome_registry.zig"); +const biome_selector = @import("biome_selector.zig"); +const biome_edge_detector = @import("biome_edge_detector.zig"); +const biome_color_provider = @import("biome_color_provider.zig"); +const biome_source_mod = @import("biome_source.zig"); // ============================================================================ -// Voronoi Biome Selection System (Issue #106) -// Selects biomes using Voronoi diagram in heat/humidity space +// Types from biome_registry.zig // ============================================================================ -/// Voronoi point defining a biome's position in climate space -/// Biomes are selected by finding the closest point to the sampled heat/humidity -pub const BiomePoint = struct { - id: BiomeId, - heat: f32, // 0-100 scale (cold to hot) - humidity: f32, // 0-100 scale (dry to wet) - weight: f32 = 1.0, // Cell size multiplier (larger = bigger biome regions) - y_min: i32 = 0, // Minimum Y level - y_max: i32 = 256, // Maximum Y level - /// Maximum allowed slope in blocks (0 = flat, 255 = vertical cliff) - max_slope: i32 = 255, - /// Minimum continentalness (0-1). Set > 0.35 for land-only biomes - min_continental: f32 = 0.0, - /// Maximum continentalness. Set < 0.35 for ocean-only biomes - max_continental: f32 = 1.0, -}; - -/// Voronoi biome points - defines where each biome sits in heat/humidity space -/// Heat: 0=frozen, 50=temperate, 100=scorching -/// Humidity: 0=arid, 50=normal, 100=saturated -pub const BIOME_POINTS = [_]BiomePoint{ - // === Ocean Biomes (continental < 0.35) === - .{ .id = .deep_ocean, .heat = 50, .humidity = 50, .weight = 1.5, .max_continental = 0.20 }, - .{ .id = .ocean, .heat = 50, .humidity = 50, .weight = 1.5, .min_continental = 0.20, .max_continental = 0.35 }, - - // === Coastal Biomes === - .{ .id = .beach, .heat = 60, .humidity = 50, .weight = 0.6, .max_slope = 2, .min_continental = 0.35, .max_continental = 0.42, .y_max = 70 }, - - // === Cold Biomes === - .{ .id = .snow_tundra, .heat = 5, .humidity = 30, .weight = 1.0, .min_continental = 0.42 }, - .{ .id = .taiga, .heat = 20, .humidity = 60, .weight = 1.0, .min_continental = 0.42 }, - .{ .id = .snowy_mountains, .heat = 10, .humidity = 40, .weight = 0.8, .min_continental = 0.60, .y_min = 100 }, - - // === Temperate Biomes === - .{ .id = .plains, .heat = 50, .humidity = 45, .weight = 1.5, .min_continental = 0.42 }, // Large weight = common - .{ .id = .forest, .heat = 45, .humidity = 65, .weight = 1.2, .min_continental = 0.42 }, - .{ .id = .mountains, .heat = 40, .humidity = 50, .weight = 0.8, .min_continental = 0.60, .y_min = 90 }, - - // === Warm/Wet Biomes === - .{ .id = .swamp, .heat = 65, .humidity = 85, .weight = 0.8, .max_slope = 3, .min_continental = 0.42, .y_max = 72 }, - .{ .id = .mangrove_swamp, .heat = 75, .humidity = 90, .weight = 0.6, .max_slope = 3, .min_continental = 0.35, .max_continental = 0.50, .y_max = 68 }, - .{ .id = .jungle, .heat = 85, .humidity = 85, .weight = 0.9, .min_continental = 0.50 }, - - // === Hot/Dry Biomes === - .{ .id = .desert, .heat = 90, .humidity = 10, .weight = 1.2, .min_continental = 0.42, .y_max = 90 }, - .{ .id = .savanna, .heat = 80, .humidity = 30, .weight = 1.0, .min_continental = 0.42 }, - .{ .id = .badlands, .heat = 85, .humidity = 15, .weight = 0.7, .min_continental = 0.55 }, - - // === Special Biomes === - .{ .id = .mushroom_fields, .heat = 50, .humidity = 80, .weight = 0.3, .min_continental = 0.35, .max_continental = 0.45 }, - .{ .id = .river, .heat = 50, .humidity = 70, .weight = 0.4, .min_continental = 0.42 }, // Selected by river mask, not Voronoi - - // === Transition Biomes (created by edge detection, but need Voronoi fallback) === - // These have extreme positions so they're rarely selected directly - .{ .id = .foothills, .heat = 45, .humidity = 45, .weight = 0.5, .min_continental = 0.55, .y_min = 75, .y_max = 100 }, - .{ .id = .marsh, .heat = 55, .humidity = 78, .weight = 0.5, .min_continental = 0.42, .y_max = 68 }, - .{ .id = .dry_plains, .heat = 70, .humidity = 25, .weight = 0.6, .min_continental = 0.42 }, - .{ .id = .coastal_plains, .heat = 55, .humidity = 50, .weight = 0.5, .min_continental = 0.35, .max_continental = 0.48 }, -}; - -/// Select biome using Voronoi diagram in heat/humidity space -/// Returns the biome whose point is closest to the given heat/humidity values -pub fn selectBiomeVoronoi(heat: f32, humidity: f32, height: i32, continentalness: f32, slope: i32) BiomeId { - var min_dist: f32 = std.math.inf(f32); - var closest: BiomeId = .plains; - - for (BIOME_POINTS) |point| { - // Check height constraint - if (height < point.y_min or height > point.y_max) continue; - - // Check slope constraint - if (slope > point.max_slope) continue; - - // Check continentalness constraint - if (continentalness < point.min_continental or continentalness > point.max_continental) continue; - - // Calculate weighted Euclidean distance in heat/humidity space - const d_heat = heat - point.heat; - const d_humidity = humidity - point.humidity; - var dist = @sqrt(d_heat * d_heat + d_humidity * d_humidity); - - // Weight adjusts effective cell size (larger weight = closer distance = more likely) - dist /= point.weight; - - if (dist < min_dist) { - min_dist = dist; - closest = point.id; - } - } - - return closest; -} - -/// Select biome using Voronoi with river override -pub fn selectBiomeVoronoiWithRiver( - heat: f32, - humidity: f32, - height: i32, - continentalness: f32, - slope: i32, - river_mask: f32, -) BiomeId { - // River biome takes priority when river mask is active - // Issue #110: Allow rivers at higher elevations (canyons) - if (river_mask > 0.5 and height < 120) { - return .river; - } - return selectBiomeVoronoi(heat, humidity, height, continentalness, slope); -} +pub const Range = biome_registry.Range; +pub const ColorTints = biome_registry.ColorTints; +pub const VegetationProfile = biome_registry.VegetationProfile; +pub const TerrainModifier = biome_registry.TerrainModifier; +pub const SurfaceBlocks = biome_registry.SurfaceBlocks; +pub const BiomeDefinition = biome_registry.BiomeDefinition; +pub const ClimateParams = biome_registry.ClimateParams; +pub const BiomeId = biome_registry.BiomeId; +pub const BiomePoint = biome_registry.BiomePoint; +pub const StructuralParams = biome_registry.StructuralParams; +pub const TreeType = biome_registry.TreeType; // ============================================================================ -// Biome Registry - All biome definitions +// Constants from biome_registry.zig // ============================================================================ -pub const BIOME_REGISTRY: []const BiomeDefinition = &.{ - // === Ocean Biomes === - .{ - .id = .deep_ocean, - .name = "Deep Ocean", - .temperature = Range.any(), - .humidity = Range.any(), - .elevation = .{ .min = 0.0, .max = 0.25 }, - .continentalness = .{ .min = 0.0, .max = 0.20 }, - .priority = 2, - .surface = .{ .top = .gravel, .filler = .gravel, .depth_range = 4 }, - .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, - .colors = .{ .water = .{ 0.1, 0.2, 0.5 } }, - }, - .{ - .id = .ocean, - .name = "Ocean", - .temperature = Range.any(), - .humidity = Range.any(), - .elevation = .{ .min = 0.0, .max = 0.30 }, - .continentalness = .{ .min = 0.0, .max = 0.35 }, - .priority = 1, - .surface = .{ .top = .sand, .filler = .sand, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, - }, - .{ - .id = .beach, - .name = "Beach", - .temperature = .{ .min = 0.2, .max = 1.0 }, - .humidity = Range.any(), - .elevation = .{ .min = 0.28, .max = 0.38 }, - .continentalness = .{ .min = 0.35, .max = 0.42 }, // NARROW beach band - .max_slope = 2, - .priority = 10, - .surface = .{ .top = .sand, .filler = .sand, .depth_range = 2 }, - .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, - }, - - // === Land Biomes (continentalness > 0.45) === - .{ - .id = .plains, - .name = "Plains", - .temperature = Range.any(), - .humidity = Range.any(), - .elevation = .{ .min = 0.25, .max = 0.70 }, - .continentalness = .{ .min = 0.45, .max = 1.0 }, - .ruggedness = Range.any(), - .priority = 0, // Fallback - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{.sparse_oak}, .tree_density = 0.02, .grass_density = 0.3 }, - .terrain = .{ .height_amplitude = 0.7, .smoothing = 0.2 }, - }, - .{ - .id = .forest, - .name = "Forest", - .temperature = .{ .min = 0.35, .max = 0.75 }, - .humidity = .{ .min = 0.40, .max = 1.0 }, - .elevation = .{ .min = 0.25, .max = 0.70 }, - .continentalness = .{ .min = 0.45, .max = 1.0 }, - .ruggedness = .{ .min = 0.0, .max = 0.60 }, - .priority = 5, - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{ .oak, .birch, .dense_oak }, .tree_density = 0.12, .bush_density = 0.05, .grass_density = 0.4 }, - .colors = .{ .grass = .{ 0.25, 0.55, 0.18 }, .foliage = .{ 0.18, 0.45, 0.12 } }, - }, - .{ - .id = .taiga, - .name = "Taiga", - .temperature = .{ .min = 0.15, .max = 0.45 }, - .humidity = .{ .min = 0.30, .max = 0.90 }, - .elevation = .{ .min = 0.25, .max = 0.75 }, - .continentalness = .{ .min = 0.45, .max = 1.0 }, - .priority = 6, - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{.spruce}, .tree_density = 0.10, .grass_density = 0.2 }, - .colors = .{ .grass = .{ 0.35, 0.55, 0.25 }, .foliage = .{ 0.28, 0.48, 0.20 } }, - }, - .{ - .id = .desert, - .name = "Desert", - .temperature = .{ .min = 0.80, .max = 1.0 }, // Very hot - .humidity = .{ .min = 0.0, .max = 0.20 }, // Very dry - .elevation = .{ .min = 0.35, .max = 0.60 }, - .continentalness = .{ .min = 0.60, .max = 1.0 }, // Inland - .ruggedness = .{ .min = 0.0, .max = 0.35 }, - .max_height = 90, - .max_slope = 4, - .priority = 6, - .surface = .{ .top = .sand, .filler = .sand, .depth_range = 6 }, - .vegetation = .{ .tree_types = &.{}, .tree_density = 0, .cactus_density = 0.015, .dead_bush_density = 0.02 }, - .terrain = .{ .height_amplitude = 0.5, .smoothing = 0.4 }, - .colors = .{ .grass = .{ 0.75, 0.70, 0.35 } }, - }, - .{ - .id = .swamp, - .name = "Swamp", - .temperature = .{ .min = 0.50, .max = 0.80 }, - .humidity = .{ .min = 0.70, .max = 1.0 }, - .elevation = .{ .min = 0.28, .max = 0.40 }, - .continentalness = .{ .min = 0.55, .max = 0.75 }, // Coastal to mid-inland - .ruggedness = .{ .min = 0.0, .max = 0.30 }, - .max_slope = 3, - .priority = 5, - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 2 }, - .vegetation = .{ .tree_types = &.{.swamp_oak}, .tree_density = 0.08 }, - .terrain = .{ .clamp_to_sea_level = true, .height_offset = -2 }, - .colors = .{ - .grass = .{ 0.35, 0.45, 0.25 }, - .foliage = .{ 0.30, 0.40, 0.20 }, - .water = .{ 0.25, 0.35, 0.30 }, - }, - }, - .{ - .id = .snow_tundra, - .name = "Snow Tundra", - .temperature = .{ .min = 0.0, .max = 0.25 }, - .humidity = Range.any(), - .elevation = .{ .min = 0.30, .max = 0.70 }, - .continentalness = .{ .min = 0.60, .max = 1.0 }, // Inland - .min_height = 70, - .max_slope = 255, - .priority = 4, - .surface = .{ .top = .snow_block, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{.spruce}, .tree_density = 0.01 }, - .colors = .{ .grass = .{ 0.7, 0.75, 0.8 } }, - }, - - // === Mountain Biomes (continentalness > 0.75) === - .{ - .id = .mountains, - .name = "Mountains", - .temperature = .{ .min = 0.25, .max = 1.0 }, - .humidity = Range.any(), - .elevation = .{ .min = 0.58, .max = 1.0 }, - .continentalness = .{ .min = 0.75, .max = 1.0 }, // Must be inland high or core - .ruggedness = .{ .min = 0.60, .max = 1.0 }, - .min_height = 90, - .min_ridge_mask = 0.1, - .priority = 2, - .surface = .{ .top = .stone, .filler = .stone, .depth_range = 1 }, - .vegetation = .{ .tree_types = &.{.sparse_oak}, .tree_density = 0 }, - .terrain = .{ .height_amplitude = 1.5 }, - }, - .{ - .id = .snowy_mountains, - .name = "Snowy Mountains", - .temperature = .{ .min = 0.0, .max = 0.35 }, - .humidity = Range.any(), - .elevation = .{ .min = 0.58, .max = 1.0 }, - .continentalness = .{ .min = 0.75, .max = 1.0 }, - .ruggedness = .{ .min = 0.55, .max = 1.0 }, - .min_height = 110, - .max_slope = 255, - .priority = 2, - .surface = .{ .top = .snow_block, .filler = .stone, .depth_range = 1 }, - .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, - .terrain = .{ .height_amplitude = 1.4 }, - .colors = .{ .grass = .{ 0.85, 0.90, 0.95 } }, - }, - - // === Special Biomes === - .{ - .id = .mangrove_swamp, - .name = "Mangrove Swamp", - .temperature = .{ .min = 0.7, .max = 0.9 }, - .humidity = .{ .min = 0.8, .max = 1.0 }, - .elevation = .{ .min = 0.2, .max = 0.4 }, - .continentalness = .{ .min = 0.45, .max = 0.60 }, // Coastal swamp - .priority = 6, - .surface = .{ .top = .mud, .filler = .mud, .depth_range = 4 }, - .vegetation = .{ .tree_types = &.{.mangrove}, .tree_density = 0.15 }, - .terrain = .{ .clamp_to_sea_level = true, .height_offset = -1 }, - .colors = .{ .grass = .{ 0.4, 0.5, 0.2 }, .foliage = .{ 0.4, 0.5, 0.2 }, .water = .{ 0.2, 0.4, 0.3 } }, - }, - .{ - .id = .jungle, - .name = "Jungle", - .temperature = .{ .min = 0.75, .max = 1.0 }, - .humidity = .{ .min = 0.7, .max = 1.0 }, - .elevation = .{ .min = 0.30, .max = 0.75 }, - .continentalness = .{ .min = 0.60, .max = 1.0 }, // Inland - .priority = 5, - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{.jungle}, .tree_density = 0.20, .bamboo_density = 0.08, .melon_density = 0.04 }, - .colors = .{ .grass = .{ 0.2, 0.8, 0.1 }, .foliage = .{ 0.1, 0.7, 0.1 } }, - }, - .{ - .id = .savanna, - .name = "Savanna", - .temperature = .{ .min = 0.65, .max = 1.0 }, // Hot climates - .humidity = .{ .min = 0.20, .max = 0.50 }, // Wider range - moderately dry - .elevation = .{ .min = 0.30, .max = 0.65 }, - .continentalness = .{ .min = 0.55, .max = 1.0 }, // Inland (less restrictive) - .priority = 5, // Higher priority to win over plains in hot zones - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{.acacia}, .tree_density = 0.015, .grass_density = 0.5, .dead_bush_density = 0.01 }, - .colors = .{ .grass = .{ 0.55, 0.55, 0.30 }, .foliage = .{ 0.50, 0.50, 0.28 } }, - }, - .{ - .id = .badlands, - .name = "Badlands", - .temperature = .{ .min = 0.7, .max = 1.0 }, - .humidity = .{ .min = 0.0, .max = 0.3 }, - .elevation = .{ .min = 0.4, .max = 0.8 }, - .continentalness = .{ .min = 0.70, .max = 1.0 }, // Deep inland - .ruggedness = .{ .min = 0.4, .max = 1.0 }, - .priority = 6, - .surface = .{ .top = .red_sand, .filler = .terracotta, .depth_range = 5 }, - .vegetation = .{ .cactus_density = 0.02 }, - .colors = .{ .grass = .{ 0.5, 0.4, 0.3 } }, - }, - .{ - .id = .mushroom_fields, - .name = "Mushroom Fields", - .temperature = .{ .min = 0.4, .max = 0.7 }, - .humidity = .{ .min = 0.7, .max = 1.0 }, - .continentalness = .{ .min = 0.0, .max = 0.15 }, // Deep ocean islands only - .max_height = 50, - .priority = 20, - .surface = .{ .top = .mycelium, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{ .huge_red_mushroom, .huge_brown_mushroom }, .tree_density = 0.05, .red_mushroom_density = 0.1, .brown_mushroom_density = 0.1 }, - .colors = .{ .grass = .{ 0.4, 0.8, 0.4 } }, - }, - .{ - .id = .river, - .name = "River", - .temperature = Range.any(), - .humidity = Range.any(), - .elevation = .{ .min = 0.0, .max = 0.35 }, - // River should NEVER win normal biome scoring - impossible range - .continentalness = .{ .min = -1.0, .max = -0.5 }, - .priority = 15, - .surface = .{ .top = .sand, .filler = .sand, .depth_range = 2 }, - .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, - }, - - // === Transition Micro-Biomes === - // These should NEVER win natural climate selection. - // They are ONLY injected by edge detection (Issue #102). - // Use impossible continental ranges so they can't match naturally. - .{ - .id = .foothills, - .name = "Foothills", - .temperature = .{ .min = 0.20, .max = 0.90 }, - .humidity = Range.any(), - .elevation = .{ .min = 0.25, .max = 0.65 }, - .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only - .ruggedness = .{ .min = 0.30, .max = 0.80 }, - .priority = 0, // Lowest priority - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{ .sparse_oak, .spruce }, .tree_density = 0.08, .grass_density = 0.4 }, - .terrain = .{ .height_amplitude = 1.1, .smoothing = 0.1 }, - .colors = .{ .grass = .{ 0.35, 0.60, 0.25 } }, - }, - .{ - .id = .marsh, - .name = "Marsh", - .temperature = .{ .min = 0.40, .max = 0.75 }, - .humidity = .{ .min = 0.55, .max = 0.80 }, - .elevation = .{ .min = 0.28, .max = 0.42 }, - .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only - .ruggedness = .{ .min = 0.0, .max = 0.30 }, - .priority = 0, // Lowest priority - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 2 }, - .vegetation = .{ .tree_types = &.{.swamp_oak}, .tree_density = 0.04, .grass_density = 0.5 }, - .terrain = .{ .height_offset = -1, .smoothing = 0.3 }, - .colors = .{ - .grass = .{ 0.30, 0.50, 0.22 }, - .foliage = .{ 0.25, 0.45, 0.18 }, - .water = .{ 0.22, 0.38, 0.35 }, - }, - }, - .{ - .id = .dry_plains, - .name = "Dry Plains", - .temperature = .{ .min = 0.60, .max = 0.85 }, - .humidity = .{ .min = 0.20, .max = 0.40 }, - .elevation = .{ .min = 0.32, .max = 0.58 }, - .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only - .ruggedness = .{ .min = 0.0, .max = 0.40 }, - .priority = 0, // Lowest priority - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{.acacia}, .tree_density = 0.005, .grass_density = 0.3, .dead_bush_density = 0.02 }, - .terrain = .{ .height_amplitude = 0.6, .smoothing = 0.25 }, - .colors = .{ .grass = .{ 0.55, 0.50, 0.28 } }, // Less yellow, more natural - }, - .{ - .id = .coastal_plains, - .name = "Coastal Plains", - .temperature = .{ .min = 0.30, .max = 0.80 }, - .humidity = .{ .min = 0.30, .max = 0.70 }, - .elevation = .{ .min = 0.28, .max = 0.45 }, - .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only - .ruggedness = .{ .min = 0.0, .max = 0.35 }, - .priority = 0, // Lowest priority - .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, - .vegetation = .{ .tree_types = &.{}, .tree_density = 0, .grass_density = 0.4 }, // No trees - .terrain = .{ .height_amplitude = 0.5, .smoothing = 0.3 }, - .colors = .{ .grass = .{ 0.35, 0.60, 0.28 } }, - }, -}; +pub const BLEND_EPSILON = biome_registry.BLEND_EPSILON; +pub const BIOME_POINTS = biome_registry.BIOME_POINTS; +pub const BIOME_REGISTRY = biome_registry.BIOME_REGISTRY; // ============================================================================ -// Biome Selection Functions +// Functions from biome_registry.zig // ============================================================================ -/// Select the best matching biome for given climate parameters -pub fn selectBiome(params: ClimateParams) BiomeId { - var best_score: f32 = 0; - var best_biome: BiomeId = .plains; // Default fallback - - for (BIOME_REGISTRY) |biome| { - const s = biome.scoreClimate(params); - if (s > best_score) { - best_score = s; - best_biome = biome.id; - } - } - - return best_biome; -} - -/// Get the BiomeDefinition for a given BiomeId -pub fn getBiomeDefinition(id: BiomeId) *const BiomeDefinition { - for (BIOME_REGISTRY) |*biome| { - if (biome.id == id) return biome; - } - // All biomes in BiomeId enum must have a corresponding definition in BIOME_REGISTRY - unreachable; -} - -/// Select biome with river override -pub fn selectBiomeWithRiver(params: ClimateParams, river_mask: f32) BiomeId { - // River biome takes priority when river mask is active - if (river_mask > 0.5 and params.elevation < 0.35) { - return .river; - } - return selectBiome(params); -} - -/// Compute ClimateParams from raw generator values -pub fn computeClimateParams( - temperature: f32, - humidity: f32, - height: i32, - continentalness: f32, - erosion: f32, - sea_level: i32, - max_height: i32, -) ClimateParams { - // Normalize elevation: 0 = below sea, 0.3 = sea level, 1.0 = max height - // Use conditional to avoid integer overflow when height < sea_level - const height_above_sea: i32 = if (height > sea_level) height - sea_level else 0; - const elevation_range = max_height - sea_level; - const elevation = if (elevation_range > 0) - 0.3 + 0.7 * @as(f32, @floatFromInt(height_above_sea)) / @as(f32, @floatFromInt(elevation_range)) - else - 0.3; - - // For underwater: scale 0-0.3 - const final_elevation = if (height < sea_level) - 0.3 * @as(f32, @floatFromInt(@max(0, height))) / @as(f32, @floatFromInt(sea_level)) - else - elevation; - - return .{ - .temperature = temperature, - .humidity = humidity, - .elevation = @min(1.0, final_elevation), - .continentalness = continentalness, - .ruggedness = 1.0 - erosion, // Invert erosion: low erosion = high ruggedness - }; -} - -/// Result of blended biome selection -pub const BiomeSelection = struct { - primary: BiomeId, - secondary: BiomeId, - blend_factor: f32, // 0.0 = pure primary, up to 0.5 = mix of secondary - primary_score: f32, - secondary_score: f32, -}; - -/// Select top 2 biomes for blending -pub fn selectBiomeBlended(params: ClimateParams) BiomeSelection { - var best_score: f32 = 0.0; - var best_biome: ?BiomeId = null; - var second_score: f32 = 0.0; - var second_biome: ?BiomeId = null; - - for (BIOME_REGISTRY) |biome| { - const s = biome.scoreClimate(params); - if (s > best_score) { - second_score = best_score; - second_biome = best_biome; - best_score = s; - best_biome = biome.id; - } else if (s > second_score) { - second_score = s; - second_biome = biome.id; - } - } - - const primary = best_biome orelse .plains; - const secondary = second_biome orelse primary; - - var blend: f32 = 0.0; - const sum = best_score + second_score; - if (sum > BLEND_EPSILON) { - blend = second_score / sum; - } - - return .{ - .primary = primary, - .secondary = secondary, - .blend_factor = blend, - .primary_score = best_score, - .secondary_score = second_score, - }; -} - -/// Select blended biomes with river override -pub fn selectBiomeWithRiverBlended(params: ClimateParams, river_mask: f32) BiomeSelection { - const selection = selectBiomeBlended(params); - - // If distinctly river, override primary with blending - if (params.elevation < 0.35) { - const river_edge0 = 0.45; - const river_edge1 = 0.55; - - if (river_mask > river_edge0) { - const t = std.math.clamp((river_mask - river_edge0) / (river_edge1 - river_edge0), 0.0, 1.0); - const river_factor = t * t * (3.0 - 2.0 * t); - - // Blend towards river: - // river_factor = 1.0 -> Pure River - // river_factor = 0.0 -> Pure Land (selection.primary) - // We set Primary=River, Secondary=Land, Blend=(1-river_factor) - return .{ - .primary = .river, - .secondary = selection.primary, - .blend_factor = 1.0 - river_factor, - .primary_score = 1.0, // River wins - .secondary_score = selection.primary_score, - }; - } - } - return selection; -} - -/// Structural constraints for biome selection -pub const StructuralParams = struct { - height: i32, - slope: i32, - continentalness: f32, - ridge_mask: f32, -}; - -/// Select biome using Voronoi diagram in heat/humidity space (Issue #106) -/// Climate temperature/humidity are converted to heat/humidity scale (0-100) -/// Structural constraints (height, continentalness) filter eligible biomes -pub fn selectBiomeWithConstraints(climate: ClimateParams, structural: StructuralParams) BiomeId { - // Convert climate params to Voronoi heat/humidity scale (0-100) - // Temperature 0-1 -> Heat 0-100 - // Humidity 0-1 -> Humidity 0-100 - const heat = climate.temperature * 100.0; - const humidity = climate.humidity * 100.0; - - return selectBiomeVoronoi(heat, humidity, structural.height, structural.continentalness, structural.slope); -} - -/// Select biome with structural constraints and river override -pub fn selectBiomeWithConstraintsAndRiver(climate: ClimateParams, structural: StructuralParams, river_mask: f32) BiomeId { - // Convert climate params to Voronoi heat/humidity scale (0-100) - const heat = climate.temperature * 100.0; - const humidity = climate.humidity * 100.0; - - return selectBiomeVoronoiWithRiver(heat, humidity, structural.height, structural.continentalness, structural.slope, river_mask); -} +pub const getBiomeDefinition = biome_registry.getBiomeDefinition; // ============================================================================ -// LOD-optimized Biome Functions (Issue #114) +// Types from biome_edge_detector.zig // ============================================================================ -/// Simplified biome selection for LOD2+ (no structural constraints) -pub fn selectBiomeSimple(climate: ClimateParams) BiomeId { - const heat = climate.temperature * 100.0; - const humidity = climate.humidity * 100.0; - const continental = climate.continentalness; - - // Ocean check - if (continental < 0.35) { - if (continental < 0.20) return .deep_ocean; - return .ocean; - } - - // Simple land biome selection based on heat/humidity - if (heat < 20) { - return if (humidity > 50) .taiga else .snow_tundra; - } else if (heat < 40) { - return if (humidity > 60) .taiga else .plains; - } else if (heat < 60) { - return if (humidity > 70) .forest else .plains; - } else if (heat < 80) { - return if (humidity > 60) .jungle else if (humidity > 30) .savanna else .desert; - } else { - return if (humidity > 40) .badlands else .desert; - } -} - -/// Get biome color for LOD rendering (packed RGB) -/// Colors adjusted to match textured output (grass/surface colors) -pub fn getBiomeColor(biome_id: BiomeId) u32 { - return switch (biome_id) { - .deep_ocean => 0x1A3380, // Darker blue - .ocean => 0x3366CC, // Standard ocean blue - .beach => 0xDDBB88, // Sand color - .plains => 0x4D8033, // Darker grass green - .forest => 0x2D591A, // Darker forest green - .taiga => 0x476647, // Muted taiga green - .desert => 0xD4B36A, // Warm desert sand - .snow_tundra => 0xDDEEFF, // Snow - .mountains => 0x888888, // Stone grey - .snowy_mountains => 0xCCDDEE, // Snowy stone - .river => 0x4488CC, // River blue - .swamp => 0x334D33, // Dark swamp green - .mangrove_swamp => 0x264026, // Muted mangrove - .jungle => 0x1A661A, // Vibrant jungle green - .savanna => 0x8C8C4D, // Dry savanna green - .badlands => 0xAA6633, // Terracotta orange - .mushroom_fields => 0x995577, // Mycelium purple - .foothills => 0x597340, // Transitional green - .marsh => 0x405933, // Transitional wetland - .dry_plains => 0x8C8047, // Transitional dry plains - .coastal_plains => 0x598047, // Transitional coastal - }; -} +pub const EdgeBand = biome_edge_detector.EdgeBand; +pub const BiomeEdgeInfo = biome_edge_detector.BiomeEdgeInfo; +pub const TransitionRule = biome_edge_detector.TransitionRule; // ============================================================================ -// BiomeSource - Unified biome selection interface (Issue #147) +// Constants from biome_edge_detector.zig // ============================================================================ -/// Result of biome selection with blending information -pub const BiomeResult = struct { - primary: BiomeId, - secondary: BiomeId, // For blending (may be same as primary) - blend_factor: f32, // 0.0 = use primary, 1.0 = use secondary -}; - -/// Parameters for BiomeSource initialization -pub const BiomeSourceParams = struct { - sea_level: i32 = 64, - edge_detection_enabled: bool = true, - ocean_threshold: f32 = 0.35, -}; - -/// Unified biome selection interface. -/// -/// BiomeSource wraps all biome selection logic into a single, configurable -/// interface. This allows swapping biome selection behavior for different -/// dimensions (e.g., Overworld vs Nether) without modifying the generator. -/// -/// Part of Issue #147: Modularize Terrain Generation Pipeline -pub const BiomeSource = struct { - params: BiomeSourceParams, - - /// Initialize with default parameters - pub fn init() BiomeSource { - return initWithParams(.{}); - } +pub const EDGE_STEP = biome_edge_detector.EDGE_STEP; +pub const EDGE_CHECK_RADII = biome_edge_detector.EDGE_CHECK_RADII; +pub const EDGE_WIDTH = biome_edge_detector.EDGE_WIDTH; +pub const TRANSITION_RULES = biome_edge_detector.TRANSITION_RULES; - /// Initialize with custom parameters - pub fn initWithParams(params: BiomeSourceParams) BiomeSource { - return .{ .params = params }; - } - - /// Primary biome selection interface. - /// - /// Selects a biome based on climate and structural parameters, - /// with optional river override. - pub fn selectBiome( - self: *const BiomeSource, - climate: ClimateParams, - structural: StructuralParams, - river_mask: f32, - ) BiomeId { - _ = self; - return selectBiomeWithConstraintsAndRiver(climate, structural, river_mask); - } - - /// Select biome with edge detection and transition biome injection. - /// - /// This is the full biome selection that includes checking for - /// biome boundaries and inserting appropriate transition biomes. - pub fn selectBiomeWithEdge( - self: *const BiomeSource, - climate: ClimateParams, - structural: StructuralParams, - river_mask: f32, - edge_info: BiomeEdgeInfo, - ) BiomeResult { - // First, get the base biome - const base_biome = self.selectBiome(climate, structural, river_mask); - - // If edge detection is disabled or no edge detected, return base - if (!self.params.edge_detection_enabled or edge_info.edge_band == .none) { - return .{ - .primary = base_biome, - .secondary = base_biome, - .blend_factor = 0.0, - }; - } +// ============================================================================ +// Functions from biome_edge_detector.zig +// ============================================================================ - // Check if transition is needed - if (edge_info.neighbor_biome) |neighbor| { - if (getTransitionBiome(base_biome, neighbor)) |transition| { - // Set blend factor based on edge band - const blend: f32 = switch (edge_info.edge_band) { - .inner => 0.3, // Closer to boundary: more original showing through - .middle => 0.2, - .outer => 0.1, - .none => 0.0, - }; - return .{ - .primary = transition, - .secondary = base_biome, - .blend_factor = blend, - }; - } - } +pub const needsTransition = biome_edge_detector.needsTransition; +pub const getTransitionBiome = biome_edge_detector.getTransitionBiome; - // No transition needed - return .{ - .primary = base_biome, - .secondary = base_biome, - .blend_factor = 0.0, - }; - } +// ============================================================================ +// Functions from biome_selector.zig +// ============================================================================ - /// Simplified biome selection for LOD levels - pub fn selectBiomeSimplified(self: *const BiomeSource, climate: ClimateParams) BiomeId { - _ = self; - return selectBiomeSimple(climate); - } +pub const selectBiomeVoronoi = biome_selector.selectBiomeVoronoi; +pub const selectBiomeVoronoiWithRiver = biome_selector.selectBiomeVoronoiWithRiver; +pub const selectBiome = biome_selector.selectBiome; +pub const selectBiomeWithRiver = biome_selector.selectBiomeWithRiver; +pub const computeClimateParams = biome_selector.computeClimateParams; +pub const BiomeSelection = biome_selector.BiomeSelection; +pub const selectBiomeBlended = biome_selector.selectBiomeBlended; +pub const selectBiomeWithRiverBlended = biome_selector.selectBiomeWithRiverBlended; +pub const selectBiomeWithConstraints = biome_selector.selectBiomeWithConstraints; +pub const selectBiomeWithConstraintsAndRiver = biome_selector.selectBiomeWithConstraintsAndRiver; +pub const selectBiomeSimple = biome_selector.selectBiomeSimple; - /// Check if a position is ocean based on continentalness - pub fn isOcean(self: *const BiomeSource, continentalness: f32) bool { - return continentalness < self.params.ocean_threshold; - } +// ============================================================================ +// Functions from biome_color_provider.zig +// ============================================================================ - /// Get the biome definition for a biome ID - pub fn getDefinition(_: *const BiomeSource, biome_id: BiomeId) BiomeDefinition { - return getBiomeDefinition(biome_id); - } +pub const getBiomeColor = biome_color_provider.getBiomeColor; - /// Get biome color for rendering - pub fn getColor(_: *const BiomeSource, biome_id: BiomeId) u32 { - return getBiomeColor(biome_id); - } +// ============================================================================ +// Types from biome_source.zig +// ============================================================================ - /// Compute climate parameters from raw values - pub fn computeClimate( - self: *const BiomeSource, - temperature: f32, - humidity: f32, - terrain_height: i32, - continentalness: f32, - erosion: f32, - max_height: i32, - ) ClimateParams { - return computeClimateParams( - temperature, - humidity, - terrain_height, - continentalness, - erosion, - self.params.sea_level, - max_height, - ); - } -}; +pub const BiomeResult = biome_source_mod.BiomeResult; +pub const BiomeSourceParams = biome_source_mod.BiomeSourceParams; +pub const BiomeSource = biome_source_mod.BiomeSource; diff --git a/src/world/worldgen/biome_color_provider.zig b/src/world/worldgen/biome_color_provider.zig new file mode 100644 index 00000000..ae1320a0 --- /dev/null +++ b/src/world/worldgen/biome_color_provider.zig @@ -0,0 +1,31 @@ +//! Biome color lookup for LOD rendering and minimap. + +const BiomeId = @import("biome_registry.zig").BiomeId; + +/// Get biome color for LOD rendering (packed RGB) +/// Colors adjusted to match textured output (grass/surface colors) +pub fn getBiomeColor(biome_id: BiomeId) u32 { + return switch (biome_id) { + .deep_ocean => 0x1A3380, // Darker blue + .ocean => 0x3366CC, // Standard ocean blue + .beach => 0xDDBB88, // Sand color + .plains => 0x4D8033, // Darker grass green + .forest => 0x2D591A, // Darker forest green + .taiga => 0x476647, // Muted taiga green + .desert => 0xD4B36A, // Warm desert sand + .snow_tundra => 0xDDEEFF, // Snow + .mountains => 0x888888, // Stone grey + .snowy_mountains => 0xCCDDEE, // Snowy stone + .river => 0x4488CC, // River blue + .swamp => 0x334D33, // Dark swamp green + .mangrove_swamp => 0x264026, // Muted mangrove + .jungle => 0x1A661A, // Vibrant jungle green + .savanna => 0x8C8C4D, // Dry savanna green + .badlands => 0xAA6633, // Terracotta orange + .mushroom_fields => 0x995577, // Mycelium purple + .foothills => 0x597340, // Transitional green + .marsh => 0x405933, // Transitional wetland + .dry_plains => 0x8C8047, // Transitional dry plains + .coastal_plains => 0x598047, // Transitional coastal + }; +} diff --git a/src/world/worldgen/biome_decorator.zig b/src/world/worldgen/biome_decorator.zig new file mode 100644 index 00000000..16573163 --- /dev/null +++ b/src/world/worldgen/biome_decorator.zig @@ -0,0 +1,98 @@ +const std = @import("std"); +const region_pkg = @import("region.zig"); +const DecorationProvider = @import("decoration_provider.zig").DecorationProvider; +const NoiseSampler = @import("noise_sampler.zig").NoiseSampler; +const Chunk = @import("../chunk.zig").Chunk; +const CHUNK_SIZE_X = @import("../chunk.zig").CHUNK_SIZE_X; +const CHUNK_SIZE_Y = @import("../chunk.zig").CHUNK_SIZE_Y; +const CHUNK_SIZE_Z = @import("../chunk.zig").CHUNK_SIZE_Z; +const BlockType = @import("../block.zig").BlockType; + +/// Biome decoration subsystem. +/// Handles post-terrain passes: ores and biome features/vegetation. +pub const BiomeDecorator = struct { + decoration_provider: DecorationProvider, + ore_seed: u64, + region_seed: u64, + + pub fn init(seed: u64, decoration_provider: DecorationProvider) BiomeDecorator { + return .{ + .decoration_provider = decoration_provider, + .ore_seed = seed +% 30, + .region_seed = seed +% 20, + }; + } + + pub fn generateOres(self: *const BiomeDecorator, chunk: *Chunk) void { + var prng = std.Random.DefaultPrng.init(self.ore_seed +% @as(u64, @bitCast(@as(i64, chunk.chunk_x))) *% 59381 +% @as(u64, @bitCast(@as(i64, chunk.chunk_z))) *% 28411); + const random = prng.random(); + placeOreVeins(chunk, .coal_ore, 20, 6, 10, 128, random); + placeOreVeins(chunk, .iron_ore, 10, 4, 5, 64, random); + placeOreVeins(chunk, .gold_ore, 3, 3, 2, 32, random); + placeOreVeins(chunk, .glowstone, 8, 4, 5, 40, random); + } + + fn placeOreVeins(chunk: *Chunk, block: BlockType, count: u32, size: u32, min_y: i32, max_y: i32, random: std.Random) void { + for (0..count) |_| { + const cx = random.uintLessThan(u32, CHUNK_SIZE_X); + const cz = random.uintLessThan(u32, CHUNK_SIZE_Z); + const range = max_y - min_y; + if (range <= 0) continue; + const cy = min_y + @as(i32, @intCast(random.uintLessThan(u32, @intCast(range)))); + const vein_size = random.uintLessThan(u32, size) + 2; + var i: u32 = 0; + while (i < vein_size) : (i += 1) { + const ox = @as(i32, @intCast(random.uintLessThan(u32, 4))) - 2; + const oy = @as(i32, @intCast(random.uintLessThan(u32, 4))) - 2; + const oz = @as(i32, @intCast(random.uintLessThan(u32, 4))) - 2; + const tx = @as(i32, @intCast(cx)) + ox; + const ty = cy + oy; + const tz = @as(i32, @intCast(cz)) + oz; + if (chunk.getBlockSafe(tx, ty, tz) == .stone) { + if (tx >= 0 and tx < CHUNK_SIZE_X and ty >= 0 and ty < CHUNK_SIZE_Y and tz >= 0 and tz < CHUNK_SIZE_Z) { + chunk.setBlock(@intCast(tx), @intCast(ty), @intCast(tz), block); + } + } + } + } + } + + pub fn generateFeatures(self: *const BiomeDecorator, chunk: *Chunk, noise_sampler: *const NoiseSampler) void { + var prng = std.Random.DefaultPrng.init(self.region_seed ^ @as(u64, @bitCast(@as(i64, chunk.chunk_x))) ^ (@as(u64, @bitCast(@as(i64, chunk.chunk_z))) << 32)); + const random = prng.random(); + + const wx_center = chunk.getWorldX() + 8; + const wz_center = chunk.getWorldZ() + 8; + const region = region_pkg.getRegion(self.region_seed, wx_center, wz_center); + const veg_mult = region_pkg.getVegetationMultiplier(region); + const allow_subbiomes = region_pkg.allowSubBiomes(region); + + var local_z: u32 = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + const surface_y = chunk.getSurfaceHeight(local_x, local_z); + if (surface_y <= 0 or surface_y >= CHUNK_SIZE_Y - 1) continue; + + const biome = chunk.biomes[local_x + local_z * CHUNK_SIZE_X]; + const wx: f32 = @floatFromInt(chunk.getWorldX() + @as(i32, @intCast(local_x))); + const wz: f32 = @floatFromInt(chunk.getWorldZ() + @as(i32, @intCast(local_z))); + const variant_val = noise_sampler.variant_noise.get2D(wx, wz); + const surface_block = chunk.getBlock(local_x, @intCast(surface_y), local_z); + + self.decoration_provider.decorate(.{ + .chunk = chunk, + .local_x = local_x, + .local_z = local_z, + .surface_y = @intCast(surface_y), + .surface_block = surface_block, + .biome = biome, + .variant = variant_val, + .allow_subbiomes = allow_subbiomes, + .veg_mult = veg_mult, + .random = random, + }); + } + } + } +}; diff --git a/src/world/worldgen/biome_edge_detector.zig b/src/world/worldgen/biome_edge_detector.zig new file mode 100644 index 00000000..f4fbcdde --- /dev/null +++ b/src/world/worldgen/biome_edge_detector.zig @@ -0,0 +1,86 @@ +//! Edge detection types, transition rules, and boundary logic. +//! Determines when biome transitions are needed and which transition biome to use. + +const BiomeId = @import("biome_registry.zig").BiomeId; + +// ============================================================================ +// Edge Detection Types and Constants (Issue #102) +// ============================================================================ + +/// Sampling step for edge detection (every N blocks) +pub const EDGE_STEP: u32 = 4; + +/// Radii to check for neighboring biomes (in world blocks) +pub const EDGE_CHECK_RADII = [_]u32{ 4, 8, 12 }; + +/// Target width of transition bands (blocks) +pub const EDGE_WIDTH: u32 = 8; + +/// Represents proximity to a biome boundary +pub const EdgeBand = enum(u2) { + none = 0, // No edge detected + outer = 1, // 8-12 blocks from boundary + middle = 2, // 4-8 blocks from boundary + inner = 3, // 0-4 blocks from boundary +}; + +/// Information about biome edge detection result +pub const BiomeEdgeInfo = struct { + base_biome: BiomeId, + neighbor_biome: ?BiomeId, // Different biome if edge detected + edge_band: EdgeBand, +}; + +/// Rule defining which biome pairs need a transition zone +pub const TransitionRule = struct { + biome_a: BiomeId, + biome_b: BiomeId, + transition: BiomeId, +}; + +/// Biome adjacency rules - pairs that need buffer biomes between them +pub const TRANSITION_RULES = [_]TransitionRule{ + // Hot/dry <-> Temperate + .{ .biome_a = .desert, .biome_b = .forest, .transition = .dry_plains }, + .{ .biome_a = .desert, .biome_b = .plains, .transition = .dry_plains }, + .{ .biome_a = .desert, .biome_b = .taiga, .transition = .dry_plains }, + .{ .biome_a = .desert, .biome_b = .jungle, .transition = .savanna }, + + // Cold <-> Temperate + .{ .biome_a = .snow_tundra, .biome_b = .plains, .transition = .taiga }, + .{ .biome_a = .snow_tundra, .biome_b = .forest, .transition = .taiga }, + + // Wetland <-> Forest + .{ .biome_a = .swamp, .biome_b = .forest, .transition = .marsh }, + .{ .biome_a = .swamp, .biome_b = .plains, .transition = .marsh }, + + // Mountain <-> Lowland + .{ .biome_a = .mountains, .biome_b = .plains, .transition = .foothills }, + .{ .biome_a = .mountains, .biome_b = .forest, .transition = .foothills }, + .{ .biome_a = .snowy_mountains, .biome_b = .taiga, .transition = .foothills }, + .{ .biome_a = .snowy_mountains, .biome_b = .snow_tundra, .transition = .foothills }, +}; + +/// Check if two biomes need a transition zone between them +pub fn needsTransition(a: BiomeId, b: BiomeId) bool { + for (TRANSITION_RULES) |rule| { + if ((rule.biome_a == a and rule.biome_b == b) or + (rule.biome_a == b and rule.biome_b == a)) + { + return true; + } + } + return false; +} + +/// Get the transition biome for a pair of biomes, if one is defined +pub fn getTransitionBiome(a: BiomeId, b: BiomeId) ?BiomeId { + for (TRANSITION_RULES) |rule| { + if ((rule.biome_a == a and rule.biome_b == b) or + (rule.biome_a == b and rule.biome_b == a)) + { + return rule.transition; + } + } + return null; +} diff --git a/src/world/worldgen/biome_registry.zig b/src/world/worldgen/biome_registry.zig new file mode 100644 index 00000000..7318a36e --- /dev/null +++ b/src/world/worldgen/biome_registry.zig @@ -0,0 +1,572 @@ +//! Biome data definitions, type declarations, and registry. +//! This module is the leaf dependency for the biome subsystem — it has no +//! imports from other biome_* modules. + +const std = @import("std"); +const BlockType = @import("../block.zig").BlockType; +const tree_registry = @import("tree_registry.zig"); +pub const TreeType = tree_registry.TreeType; + +/// Minimum sum threshold for biome blend calculation to avoid division by near-zero values +pub const BLEND_EPSILON: f32 = 0.0001; + +/// Represents a range of values for biome parameter matching +pub const Range = struct { + min: f32, + max: f32, + + /// Check if a value falls within this range + pub fn contains(self: Range, value: f32) bool { + return value >= self.min and value <= self.max; + } + + /// Get normalized distance from center (0 = at center, 1 = at edge) + pub fn distanceFromCenter(self: Range, value: f32) f32 { + const center = (self.min + self.max) * 0.5; + const half_width = (self.max - self.min) * 0.5; + if (half_width <= 0) return if (value == center) 0 else 1; + return @min(1.0, @abs(value - center) / half_width); + } + + /// Convenience for "any value" + pub fn any() Range { + return .{ .min = 0.0, .max = 1.0 }; + } +}; + +/// Color tints for visual biome identity (RGB 0-1) +pub const ColorTints = struct { + grass: [3]f32 = .{ 0.3, 0.65, 0.2 }, // Default green + foliage: [3]f32 = .{ 0.2, 0.5, 0.15 }, + water: [3]f32 = .{ 0.2, 0.4, 0.8 }, +}; + +/// Vegetation profile for biome-driven placement +pub const VegetationProfile = struct { + tree_types: []const TreeType = &.{.oak}, + tree_density: f32 = 0.05, // Probability per attempt + bush_density: f32 = 0.0, + grass_density: f32 = 0.0, + cactus_density: f32 = 0.0, + dead_bush_density: f32 = 0.0, + bamboo_density: f32 = 0.0, + melon_density: f32 = 0.0, + red_mushroom_density: f32 = 0.0, + brown_mushroom_density: f32 = 0.0, +}; + +/// Terrain modifiers applied during height computation +pub const TerrainModifier = struct { + /// Multiplier for hill/mountain amplitude (1.0 = normal) + height_amplitude: f32 = 1.0, + /// How much to smooth/flatten terrain (0 = no change, 1 = fully flat) + smoothing: f32 = 0.0, + /// Clamp height near sea level (for swamps) + clamp_to_sea_level: bool = false, + /// Additional height offset + height_offset: f32 = 0.0, +}; + +/// Surface block configuration +pub const SurfaceBlocks = struct { + top: BlockType = .grass, + filler: BlockType = .dirt, + depth_range: i32 = 3, +}; + +/// Complete biome definition - data-driven and extensible +pub const BiomeDefinition = struct { + id: BiomeId, + name: []const u8, + + // Parameter ranges for selection + temperature: Range, + humidity: Range, + elevation: Range = Range.any(), + continentalness: Range = Range.any(), + ruggedness: Range = Range.any(), + + // Structural constraints - terrain structure determines biome eligibility + min_height: i32 = 0, // Minimum absolute height (blocks from y=0) + max_height: i32 = 256, // Maximum absolute height + max_slope: i32 = 255, // Maximum allowed slope in blocks (0 = flat) + min_ridge_mask: f32 = 0.0, // Minimum ridge mask value + max_ridge_mask: f32 = 1.0, // Maximum ridge mask value + + // Selection tuning + priority: i32 = 0, // Higher priority wins ties + blend_weight: f32 = 1.0, // For future blending + + // Biome properties + surface: SurfaceBlocks = .{}, + vegetation: VegetationProfile = .{}, + terrain: TerrainModifier = .{}, + colors: ColorTints = .{}, + + /// Check if biome meets structural constraints (height, slope, continentalness, ridge) + pub fn meetsStructuralConstraints(self: BiomeDefinition, height: i32, slope: i32, continentalness: f32, ridge_mask: f32) bool { + if (height < self.min_height) return false; + if (height > self.max_height) return false; + if (slope > self.max_slope) return false; + if (!self.continentalness.contains(continentalness)) return false; + if (ridge_mask < self.min_ridge_mask or ridge_mask > self.max_ridge_mask) return false; + return true; + } + + /// Score how well this biome matches the given climate parameters + /// Only temperature, humidity, and elevation affect the score (structural already filtered) + pub fn scoreClimate(self: BiomeDefinition, params: ClimateParams) f32 { + // Check if within climate ranges + if (!self.temperature.contains(params.temperature)) return 0; + if (!self.humidity.contains(params.humidity)) return 0; + if (!self.elevation.contains(params.elevation)) return 0; + + // Compute weighted distance from ideal center + const t_dist = self.temperature.distanceFromCenter(params.temperature); + const h_dist = self.humidity.distanceFromCenter(params.humidity); + const e_dist = self.elevation.distanceFromCenter(params.elevation); + + // Average distance (lower is better) + const avg_dist = (t_dist + h_dist + e_dist) / 3.0; + + // Convert to score (higher is better), add priority bonus + return (1.0 - avg_dist) + @as(f32, @floatFromInt(self.priority)) * 0.01; + } +}; + +/// Climate parameters computed per (x,z) column +pub const ClimateParams = struct { + temperature: f32, // 0=cold, 1=hot (altitude-adjusted) + humidity: f32, // 0=dry, 1=wet + elevation: f32, // Normalized: 0=sea level, 1=max height + continentalness: f32, // 0=deep ocean, 1=deep inland + ruggedness: f32, // 0=smooth, 1=mountainous (erosion inverted) +}; + +/// Biome identifiers - matches existing enum in block.zig +/// Per worldgen-revamp.md Section 4.3: Add transition micro-biomes +pub const BiomeId = enum(u8) { + deep_ocean = 0, + ocean = 1, + beach = 2, + plains = 3, + forest = 4, + taiga = 5, + desert = 6, + snow_tundra = 7, + mountains = 8, + snowy_mountains = 9, + river = 10, + swamp = 11, // New biome from spec + mangrove_swamp = 12, + jungle = 13, + savanna = 14, + badlands = 15, + mushroom_fields = 16, + // Per worldgen-revamp.md Section 4.3: Transition micro-biomes + foothills = 17, // Plains <-> Mountains transition + marsh = 18, // Forest <-> Swamp transition + dry_plains = 19, // Desert <-> Forest/Plains transition + coastal_plains = 20, // Coastal no-tree zone +}; + +/// Voronoi point defining a biome's position in climate space +/// Biomes are selected by finding the closest point to the sampled heat/humidity +pub const BiomePoint = struct { + id: BiomeId, + heat: f32, // 0-100 scale (cold to hot) + humidity: f32, // 0-100 scale (dry to wet) + weight: f32 = 1.0, // Cell size multiplier (larger = bigger biome regions) + y_min: i32 = 0, // Minimum Y level + y_max: i32 = 256, // Maximum Y level + /// Maximum allowed slope in blocks (0 = flat, 255 = vertical cliff) + max_slope: i32 = 255, + /// Minimum continentalness (0-1). Set > 0.35 for land-only biomes + min_continental: f32 = 0.0, + /// Maximum continentalness. Set < 0.35 for ocean-only biomes + max_continental: f32 = 1.0, +}; + +/// Structural constraints for biome selection +pub const StructuralParams = struct { + height: i32, + slope: i32, + continentalness: f32, + ridge_mask: f32, +}; + +// ============================================================================ +// Voronoi Biome Points (Issue #106) +// ============================================================================ + +/// Voronoi biome points - defines where each biome sits in heat/humidity space +/// Heat: 0=frozen, 50=temperate, 100=scorching +/// Humidity: 0=arid, 50=normal, 100=saturated +pub const BIOME_POINTS = [_]BiomePoint{ + // === Ocean Biomes (continental < 0.35) === + .{ .id = .deep_ocean, .heat = 50, .humidity = 50, .weight = 1.5, .max_continental = 0.20 }, + .{ .id = .ocean, .heat = 50, .humidity = 50, .weight = 1.5, .min_continental = 0.20, .max_continental = 0.35 }, + + // === Coastal Biomes === + .{ .id = .beach, .heat = 60, .humidity = 50, .weight = 0.6, .max_slope = 2, .min_continental = 0.35, .max_continental = 0.42, .y_max = 70 }, + + // === Cold Biomes === + .{ .id = .snow_tundra, .heat = 5, .humidity = 30, .weight = 1.0, .min_continental = 0.42 }, + .{ .id = .taiga, .heat = 20, .humidity = 60, .weight = 1.0, .min_continental = 0.42 }, + .{ .id = .snowy_mountains, .heat = 10, .humidity = 40, .weight = 0.8, .min_continental = 0.60, .y_min = 100 }, + + // === Temperate Biomes === + .{ .id = .plains, .heat = 50, .humidity = 45, .weight = 1.5, .min_continental = 0.42 }, // Large weight = common + .{ .id = .forest, .heat = 45, .humidity = 65, .weight = 1.2, .min_continental = 0.42 }, + .{ .id = .mountains, .heat = 40, .humidity = 50, .weight = 0.8, .min_continental = 0.60, .y_min = 90 }, + + // === Warm/Wet Biomes === + .{ .id = .swamp, .heat = 65, .humidity = 85, .weight = 0.8, .max_slope = 3, .min_continental = 0.42, .y_max = 72 }, + .{ .id = .mangrove_swamp, .heat = 75, .humidity = 90, .weight = 0.6, .max_slope = 3, .min_continental = 0.35, .max_continental = 0.50, .y_max = 68 }, + .{ .id = .jungle, .heat = 85, .humidity = 85, .weight = 0.9, .min_continental = 0.50 }, + + // === Hot/Dry Biomes === + .{ .id = .desert, .heat = 90, .humidity = 10, .weight = 1.2, .min_continental = 0.42, .y_max = 90 }, + .{ .id = .savanna, .heat = 80, .humidity = 30, .weight = 1.0, .min_continental = 0.42 }, + .{ .id = .badlands, .heat = 85, .humidity = 15, .weight = 0.7, .min_continental = 0.55 }, + + // === Special Biomes === + .{ .id = .mushroom_fields, .heat = 50, .humidity = 80, .weight = 0.3, .min_continental = 0.35, .max_continental = 0.45 }, + .{ .id = .river, .heat = 50, .humidity = 70, .weight = 0.4, .min_continental = 0.42 }, // Selected by river mask, not Voronoi + + // === Transition Biomes (created by edge detection, but need Voronoi fallback) === + // These have extreme positions so they're rarely selected directly + .{ .id = .foothills, .heat = 45, .humidity = 45, .weight = 0.5, .min_continental = 0.55, .y_min = 75, .y_max = 100 }, + .{ .id = .marsh, .heat = 55, .humidity = 78, .weight = 0.5, .min_continental = 0.42, .y_max = 68 }, + .{ .id = .dry_plains, .heat = 70, .humidity = 25, .weight = 0.6, .min_continental = 0.42 }, + .{ .id = .coastal_plains, .heat = 55, .humidity = 50, .weight = 0.5, .min_continental = 0.35, .max_continental = 0.48 }, +}; + +// ============================================================================ +// Biome Registry - All biome definitions +// ============================================================================ + +pub const BIOME_REGISTRY: []const BiomeDefinition = &.{ + // === Ocean Biomes === + .{ + .id = .deep_ocean, + .name = "Deep Ocean", + .temperature = Range.any(), + .humidity = Range.any(), + .elevation = .{ .min = 0.0, .max = 0.25 }, + .continentalness = .{ .min = 0.0, .max = 0.20 }, + .priority = 2, + .surface = .{ .top = .gravel, .filler = .gravel, .depth_range = 4 }, + .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, + .colors = .{ .water = .{ 0.1, 0.2, 0.5 } }, + }, + .{ + .id = .ocean, + .name = "Ocean", + .temperature = Range.any(), + .humidity = Range.any(), + .elevation = .{ .min = 0.0, .max = 0.30 }, + .continentalness = .{ .min = 0.0, .max = 0.35 }, + .priority = 1, + .surface = .{ .top = .sand, .filler = .sand, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, + }, + .{ + .id = .beach, + .name = "Beach", + .temperature = .{ .min = 0.2, .max = 1.0 }, + .humidity = Range.any(), + .elevation = .{ .min = 0.28, .max = 0.38 }, + .continentalness = .{ .min = 0.35, .max = 0.42 }, // NARROW beach band + .max_slope = 2, + .priority = 10, + .surface = .{ .top = .sand, .filler = .sand, .depth_range = 2 }, + .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, + }, + + // === Land Biomes (continentalness > 0.45) === + .{ + .id = .plains, + .name = "Plains", + .temperature = Range.any(), + .humidity = Range.any(), + .elevation = .{ .min = 0.25, .max = 0.70 }, + .continentalness = .{ .min = 0.45, .max = 1.0 }, + .ruggedness = Range.any(), + .priority = 0, // Fallback + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{.sparse_oak}, .tree_density = 0.02, .grass_density = 0.3 }, + .terrain = .{ .height_amplitude = 0.7, .smoothing = 0.2 }, + }, + .{ + .id = .forest, + .name = "Forest", + .temperature = .{ .min = 0.35, .max = 0.75 }, + .humidity = .{ .min = 0.40, .max = 1.0 }, + .elevation = .{ .min = 0.25, .max = 0.70 }, + .continentalness = .{ .min = 0.45, .max = 1.0 }, + .ruggedness = .{ .min = 0.0, .max = 0.60 }, + .priority = 5, + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{ .oak, .birch, .dense_oak }, .tree_density = 0.12, .bush_density = 0.05, .grass_density = 0.4 }, + .colors = .{ .grass = .{ 0.25, 0.55, 0.18 }, .foliage = .{ 0.18, 0.45, 0.12 } }, + }, + .{ + .id = .taiga, + .name = "Taiga", + .temperature = .{ .min = 0.15, .max = 0.45 }, + .humidity = .{ .min = 0.30, .max = 0.90 }, + .elevation = .{ .min = 0.25, .max = 0.75 }, + .continentalness = .{ .min = 0.45, .max = 1.0 }, + .priority = 6, + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{.spruce}, .tree_density = 0.10, .grass_density = 0.2 }, + .colors = .{ .grass = .{ 0.35, 0.55, 0.25 }, .foliage = .{ 0.28, 0.48, 0.20 } }, + }, + .{ + .id = .desert, + .name = "Desert", + .temperature = .{ .min = 0.80, .max = 1.0 }, // Very hot + .humidity = .{ .min = 0.0, .max = 0.20 }, // Very dry + .elevation = .{ .min = 0.35, .max = 0.60 }, + .continentalness = .{ .min = 0.60, .max = 1.0 }, // Inland + .ruggedness = .{ .min = 0.0, .max = 0.35 }, + .max_height = 90, + .max_slope = 4, + .priority = 6, + .surface = .{ .top = .sand, .filler = .sand, .depth_range = 6 }, + .vegetation = .{ .tree_types = &.{}, .tree_density = 0, .cactus_density = 0.015, .dead_bush_density = 0.02 }, + .terrain = .{ .height_amplitude = 0.5, .smoothing = 0.4 }, + .colors = .{ .grass = .{ 0.75, 0.70, 0.35 } }, + }, + .{ + .id = .swamp, + .name = "Swamp", + .temperature = .{ .min = 0.50, .max = 0.80 }, + .humidity = .{ .min = 0.70, .max = 1.0 }, + .elevation = .{ .min = 0.28, .max = 0.40 }, + .continentalness = .{ .min = 0.55, .max = 0.75 }, // Coastal to mid-inland + .ruggedness = .{ .min = 0.0, .max = 0.30 }, + .max_slope = 3, + .priority = 5, + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 2 }, + .vegetation = .{ .tree_types = &.{.swamp_oak}, .tree_density = 0.08 }, + .terrain = .{ .clamp_to_sea_level = true, .height_offset = -2 }, + .colors = .{ + .grass = .{ 0.35, 0.45, 0.25 }, + .foliage = .{ 0.30, 0.40, 0.20 }, + .water = .{ 0.25, 0.35, 0.30 }, + }, + }, + .{ + .id = .snow_tundra, + .name = "Snow Tundra", + .temperature = .{ .min = 0.0, .max = 0.25 }, + .humidity = Range.any(), + .elevation = .{ .min = 0.30, .max = 0.70 }, + .continentalness = .{ .min = 0.60, .max = 1.0 }, // Inland + .min_height = 70, + .max_slope = 255, + .priority = 4, + .surface = .{ .top = .snow_block, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{.spruce}, .tree_density = 0.01 }, + .colors = .{ .grass = .{ 0.7, 0.75, 0.8 } }, + }, + + // === Mountain Biomes (continentalness > 0.75) === + .{ + .id = .mountains, + .name = "Mountains", + .temperature = .{ .min = 0.25, .max = 1.0 }, + .humidity = Range.any(), + .elevation = .{ .min = 0.58, .max = 1.0 }, + .continentalness = .{ .min = 0.75, .max = 1.0 }, // Must be inland high or core + .ruggedness = .{ .min = 0.60, .max = 1.0 }, + .min_height = 90, + .min_ridge_mask = 0.1, + .priority = 2, + .surface = .{ .top = .stone, .filler = .stone, .depth_range = 1 }, + .vegetation = .{ .tree_types = &.{.sparse_oak}, .tree_density = 0 }, + .terrain = .{ .height_amplitude = 1.5 }, + }, + .{ + .id = .snowy_mountains, + .name = "Snowy Mountains", + .temperature = .{ .min = 0.0, .max = 0.35 }, + .humidity = Range.any(), + .elevation = .{ .min = 0.58, .max = 1.0 }, + .continentalness = .{ .min = 0.75, .max = 1.0 }, + .ruggedness = .{ .min = 0.55, .max = 1.0 }, + .min_height = 110, + .max_slope = 255, + .priority = 2, + .surface = .{ .top = .snow_block, .filler = .stone, .depth_range = 1 }, + .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, + .terrain = .{ .height_amplitude = 1.4 }, + .colors = .{ .grass = .{ 0.85, 0.90, 0.95 } }, + }, + + // === Special Biomes === + .{ + .id = .mangrove_swamp, + .name = "Mangrove Swamp", + .temperature = .{ .min = 0.7, .max = 0.9 }, + .humidity = .{ .min = 0.8, .max = 1.0 }, + .elevation = .{ .min = 0.2, .max = 0.4 }, + .continentalness = .{ .min = 0.45, .max = 0.60 }, // Coastal swamp + .priority = 6, + .surface = .{ .top = .mud, .filler = .mud, .depth_range = 4 }, + .vegetation = .{ .tree_types = &.{.mangrove}, .tree_density = 0.15 }, + .terrain = .{ .clamp_to_sea_level = true, .height_offset = -1 }, + .colors = .{ .grass = .{ 0.4, 0.5, 0.2 }, .foliage = .{ 0.4, 0.5, 0.2 }, .water = .{ 0.2, 0.4, 0.3 } }, + }, + .{ + .id = .jungle, + .name = "Jungle", + .temperature = .{ .min = 0.75, .max = 1.0 }, + .humidity = .{ .min = 0.7, .max = 1.0 }, + .elevation = .{ .min = 0.30, .max = 0.75 }, + .continentalness = .{ .min = 0.60, .max = 1.0 }, // Inland + .priority = 5, + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{.jungle}, .tree_density = 0.20, .bamboo_density = 0.08, .melon_density = 0.04 }, + .colors = .{ .grass = .{ 0.2, 0.8, 0.1 }, .foliage = .{ 0.1, 0.7, 0.1 } }, + }, + .{ + .id = .savanna, + .name = "Savanna", + .temperature = .{ .min = 0.65, .max = 1.0 }, // Hot climates + .humidity = .{ .min = 0.20, .max = 0.50 }, // Wider range - moderately dry + .elevation = .{ .min = 0.30, .max = 0.65 }, + .continentalness = .{ .min = 0.55, .max = 1.0 }, // Inland (less restrictive) + .priority = 5, // Higher priority to win over plains in hot zones + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{.acacia}, .tree_density = 0.015, .grass_density = 0.5, .dead_bush_density = 0.01 }, + .colors = .{ .grass = .{ 0.55, 0.55, 0.30 }, .foliage = .{ 0.50, 0.50, 0.28 } }, + }, + .{ + .id = .badlands, + .name = "Badlands", + .temperature = .{ .min = 0.7, .max = 1.0 }, + .humidity = .{ .min = 0.0, .max = 0.3 }, + .elevation = .{ .min = 0.4, .max = 0.8 }, + .continentalness = .{ .min = 0.70, .max = 1.0 }, // Deep inland + .ruggedness = .{ .min = 0.4, .max = 1.0 }, + .priority = 6, + .surface = .{ .top = .red_sand, .filler = .terracotta, .depth_range = 5 }, + .vegetation = .{ .cactus_density = 0.02 }, + .colors = .{ .grass = .{ 0.5, 0.4, 0.3 } }, + }, + .{ + .id = .mushroom_fields, + .name = "Mushroom Fields", + .temperature = .{ .min = 0.4, .max = 0.7 }, + .humidity = .{ .min = 0.7, .max = 1.0 }, + .continentalness = .{ .min = 0.0, .max = 0.15 }, // Deep ocean islands only + .max_height = 50, + .priority = 20, + .surface = .{ .top = .mycelium, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{ .huge_red_mushroom, .huge_brown_mushroom }, .tree_density = 0.05, .red_mushroom_density = 0.1, .brown_mushroom_density = 0.1 }, + .colors = .{ .grass = .{ 0.4, 0.8, 0.4 } }, + }, + .{ + .id = .river, + .name = "River", + .temperature = Range.any(), + .humidity = Range.any(), + .elevation = .{ .min = 0.0, .max = 0.35 }, + // River should NEVER win normal biome scoring - impossible range + .continentalness = .{ .min = -1.0, .max = -0.5 }, + .priority = 15, + .surface = .{ .top = .sand, .filler = .sand, .depth_range = 2 }, + .vegetation = .{ .tree_types = &.{}, .tree_density = 0 }, + }, + + // === Transition Micro-Biomes === + // These should NEVER win natural climate selection. + // They are ONLY injected by edge detection (Issue #102). + // Use impossible continental ranges so they can't match naturally. + .{ + .id = .foothills, + .name = "Foothills", + .temperature = .{ .min = 0.20, .max = 0.90 }, + .humidity = Range.any(), + .elevation = .{ .min = 0.25, .max = 0.65 }, + .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only + .ruggedness = .{ .min = 0.30, .max = 0.80 }, + .priority = 0, // Lowest priority + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{ .sparse_oak, .spruce }, .tree_density = 0.08, .grass_density = 0.4 }, + .terrain = .{ .height_amplitude = 1.1, .smoothing = 0.1 }, + .colors = .{ .grass = .{ 0.35, 0.60, 0.25 } }, + }, + .{ + .id = .marsh, + .name = "Marsh", + .temperature = .{ .min = 0.40, .max = 0.75 }, + .humidity = .{ .min = 0.55, .max = 0.80 }, + .elevation = .{ .min = 0.28, .max = 0.42 }, + .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only + .ruggedness = .{ .min = 0.0, .max = 0.30 }, + .priority = 0, // Lowest priority + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 2 }, + .vegetation = .{ .tree_types = &.{.swamp_oak}, .tree_density = 0.04, .grass_density = 0.5 }, + .terrain = .{ .height_offset = -1, .smoothing = 0.3 }, + .colors = .{ + .grass = .{ 0.30, 0.50, 0.22 }, + .foliage = .{ 0.25, 0.45, 0.18 }, + .water = .{ 0.22, 0.38, 0.35 }, + }, + }, + .{ + .id = .dry_plains, + .name = "Dry Plains", + .temperature = .{ .min = 0.60, .max = 0.85 }, + .humidity = .{ .min = 0.20, .max = 0.40 }, + .elevation = .{ .min = 0.32, .max = 0.58 }, + .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only + .ruggedness = .{ .min = 0.0, .max = 0.40 }, + .priority = 0, // Lowest priority + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{.acacia}, .tree_density = 0.005, .grass_density = 0.3, .dead_bush_density = 0.02 }, + .terrain = .{ .height_amplitude = 0.6, .smoothing = 0.25 }, + .colors = .{ .grass = .{ 0.55, 0.50, 0.28 } }, // Less yellow, more natural + }, + .{ + .id = .coastal_plains, + .name = "Coastal Plains", + .temperature = .{ .min = 0.30, .max = 0.80 }, + .humidity = .{ .min = 0.30, .max = 0.70 }, + .elevation = .{ .min = 0.28, .max = 0.45 }, + .continentalness = .{ .min = -1.0, .max = -0.5 }, // IMPOSSIBLE: edge-injection only + .ruggedness = .{ .min = 0.0, .max = 0.35 }, + .priority = 0, // Lowest priority + .surface = .{ .top = .grass, .filler = .dirt, .depth_range = 3 }, + .vegetation = .{ .tree_types = &.{}, .tree_density = 0, .grass_density = 0.4 }, // No trees + .terrain = .{ .height_amplitude = 0.5, .smoothing = 0.3 }, + .colors = .{ .grass = .{ 0.35, 0.60, 0.28 } }, + }, +}; + +/// Comptime-generated lookup table for O(1) BiomeDefinition access by BiomeId. +const BIOME_LOOKUP: [21]*const BiomeDefinition = blk: { + var table: [21]*const BiomeDefinition = undefined; + var filled = [_]bool{false} ** 21; + for (BIOME_REGISTRY) |*def| { + const idx = @intFromEnum(def.id); + table[idx] = def; + filled[idx] = true; + } + // Verify every BiomeId has a definition + for (0..21) |i| { + if (!filled[i]) { + @compileError("BIOME_REGISTRY is missing a BiomeDefinition entry"); + } + } + break :blk table; +}; + +/// Get the BiomeDefinition for a given BiomeId (O(1) comptime lookup). +pub fn getBiomeDefinition(id: BiomeId) *const BiomeDefinition { + return BIOME_LOOKUP[@intFromEnum(id)]; +} diff --git a/src/world/worldgen/biome_selector.zig b/src/world/worldgen/biome_selector.zig new file mode 100644 index 00000000..8bffa82f --- /dev/null +++ b/src/world/worldgen/biome_selector.zig @@ -0,0 +1,271 @@ +//! Biome selection algorithms: Voronoi, score-based, blended, and LOD-simplified. +//! All selection functions are pure — they read from the registry but have no side effects. + +const std = @import("std"); +const registry = @import("biome_registry.zig"); + +const BiomeId = registry.BiomeId; +const ClimateParams = registry.ClimateParams; +const StructuralParams = registry.StructuralParams; +const BIOME_REGISTRY = registry.BIOME_REGISTRY; +const BIOME_POINTS = registry.BIOME_POINTS; +const BLEND_EPSILON = registry.BLEND_EPSILON; + +// ============================================================================ +// Voronoi Biome Selection (Issue #106) +// ============================================================================ + +/// Select biome using Voronoi diagram in heat/humidity space +/// Returns the biome whose point is closest to the given heat/humidity values +pub fn selectBiomeVoronoi(heat: f32, humidity: f32, height: i32, continentalness: f32, slope: i32) BiomeId { + var min_dist: f32 = std.math.inf(f32); + var closest: BiomeId = .plains; + + for (BIOME_POINTS) |point| { + // Check height constraint + if (height < point.y_min or height > point.y_max) continue; + + // Check slope constraint + if (slope > point.max_slope) continue; + + // Check continentalness constraint + if (continentalness < point.min_continental or continentalness > point.max_continental) continue; + + // Calculate weighted Euclidean distance in heat/humidity space + const d_heat = heat - point.heat; + const d_humidity = humidity - point.humidity; + var dist = @sqrt(d_heat * d_heat + d_humidity * d_humidity); + + // Weight adjusts effective cell size (larger weight = closer distance = more likely) + dist /= point.weight; + + if (dist < min_dist) { + min_dist = dist; + closest = point.id; + } + } + + return closest; +} + +/// Select biome using Voronoi with river override +pub fn selectBiomeVoronoiWithRiver( + heat: f32, + humidity: f32, + height: i32, + continentalness: f32, + slope: i32, + river_mask: f32, +) BiomeId { + // River biome takes priority when river mask is active + // Issue #110: Allow rivers at higher elevations (canyons) + if (river_mask > 0.5 and height < 120) { + return .river; + } + return selectBiomeVoronoi(heat, humidity, height, continentalness, slope); +} + +// ============================================================================ +// Score-based Biome Selection +// ============================================================================ + +/// Select the best matching biome for given climate parameters +pub fn selectBiome(params: ClimateParams) BiomeId { + var best_score: f32 = 0; + var best_biome: BiomeId = .plains; // Default fallback + + for (BIOME_REGISTRY) |biome| { + const s = biome.scoreClimate(params); + if (s > best_score) { + best_score = s; + best_biome = biome.id; + } + } + + return best_biome; +} + +/// Select biome with river override +pub fn selectBiomeWithRiver(params: ClimateParams, river_mask: f32) BiomeId { + // River biome takes priority when river mask is active + if (river_mask > 0.5 and params.elevation < 0.35) { + return .river; + } + return selectBiome(params); +} + +/// Compute ClimateParams from raw generator values +pub fn computeClimateParams( + temperature: f32, + humidity: f32, + height: i32, + continentalness: f32, + erosion: f32, + sea_level: i32, + max_height: i32, +) ClimateParams { + // Normalize elevation: 0 = below sea, 0.3 = sea level, 1.0 = max height + // Use conditional to avoid integer overflow when height < sea_level + const height_above_sea: i32 = if (height > sea_level) height - sea_level else 0; + const elevation_range = max_height - sea_level; + const elevation = if (elevation_range > 0) + 0.3 + 0.7 * @as(f32, @floatFromInt(height_above_sea)) / @as(f32, @floatFromInt(elevation_range)) + else + 0.3; + + // For underwater: scale 0-0.3 + const final_elevation = if (height < sea_level) + 0.3 * @as(f32, @floatFromInt(@max(0, height))) / @as(f32, @floatFromInt(sea_level)) + else + elevation; + + return .{ + .temperature = temperature, + .humidity = humidity, + .elevation = @min(1.0, final_elevation), + .continentalness = continentalness, + .ruggedness = 1.0 - erosion, // Invert erosion: low erosion = high ruggedness + }; +} + +// ============================================================================ +// Blended Biome Selection +// ============================================================================ + +/// Result of blended biome selection +pub const BiomeSelection = struct { + primary: BiomeId, + secondary: BiomeId, + blend_factor: f32, // 0.0 = pure primary, up to 0.5 = mix of secondary + primary_score: f32, + secondary_score: f32, +}; + +/// Select top 2 biomes for blending +pub fn selectBiomeBlended(params: ClimateParams) BiomeSelection { + var best_score: f32 = 0.0; + var best_biome: ?BiomeId = null; + var second_score: f32 = 0.0; + var second_biome: ?BiomeId = null; + + for (BIOME_REGISTRY) |biome| { + const s = biome.scoreClimate(params); + if (s > best_score) { + second_score = best_score; + second_biome = best_biome; + best_score = s; + best_biome = biome.id; + } else if (s > second_score) { + second_score = s; + second_biome = biome.id; + } + } + + const primary = best_biome orelse .plains; + const secondary = second_biome orelse primary; + + var blend: f32 = 0.0; + const sum = best_score + second_score; + if (sum > BLEND_EPSILON) { + blend = second_score / sum; + } + + return .{ + .primary = primary, + .secondary = secondary, + .blend_factor = blend, + .primary_score = best_score, + .secondary_score = second_score, + }; +} + +/// Select blended biomes with river override +pub fn selectBiomeWithRiverBlended(params: ClimateParams, river_mask: f32) BiomeSelection { + const selection = selectBiomeBlended(params); + + // If distinctly river, override primary with blending + if (params.elevation < 0.35) { + const river_edge0 = 0.45; + const river_edge1 = 0.55; + + if (river_mask > river_edge0) { + const t = std.math.clamp((river_mask - river_edge0) / (river_edge1 - river_edge0), 0.0, 1.0); + const river_factor = t * t * (3.0 - 2.0 * t); + + // Blend towards river: + // river_factor = 1.0 -> Pure River + // river_factor = 0.0 -> Pure Land (selection.primary) + // We set Primary=River, Secondary=Land, Blend=(1-river_factor) + return .{ + .primary = .river, + .secondary = selection.primary, + .blend_factor = 1.0 - river_factor, + .primary_score = 1.0, // River wins + .secondary_score = selection.primary_score, + }; + } + } + return selection; +} + +// ============================================================================ +// Constraint-based Selection (Voronoi + structural filtering) +// ============================================================================ + +/// Select biome using Voronoi diagram in heat/humidity space (Issue #106) +/// Climate temperature/humidity are converted to heat/humidity scale (0-100) +/// Structural constraints (height, continentalness) filter eligible biomes +pub fn selectBiomeWithConstraints(climate: ClimateParams, structural: StructuralParams) BiomeId { + // Convert climate params to Voronoi heat/humidity scale (0-100) + // Temperature 0-1 -> Heat 0-100 + // Humidity 0-1 -> Humidity 0-100 + const heat = climate.temperature * 100.0; + const humidity = climate.humidity * 100.0; + + return selectBiomeVoronoi(heat, humidity, structural.height, structural.continentalness, structural.slope); +} + +/// Select biome with structural constraints and river override +pub fn selectBiomeWithConstraintsAndRiver(climate: ClimateParams, structural: StructuralParams, river_mask: f32) BiomeId { + // Convert climate params to Voronoi heat/humidity scale (0-100) + const heat = climate.temperature * 100.0; + const humidity = climate.humidity * 100.0; + + return selectBiomeVoronoiWithRiver(heat, humidity, structural.height, structural.continentalness, structural.slope, river_mask); +} + +// ============================================================================ +// LOD-optimized Biome Functions (Issue #114) +// ============================================================================ + +/// Simplified biome selection for LOD2+ (no structural constraints). +/// +/// Intentionally excludes transition micro-biomes (foothills, marsh, dry_plains, +/// coastal_plains), special biomes (mushroom_fields, mangrove_swamp), beach, +/// and mountain variants. These are either rare, narrow-band, or structurally +/// dependent biomes that don't significantly affect distant terrain silhouette. +/// The full Voronoi selection handles them when chunks enter LOD0/LOD1 range. +pub fn selectBiomeSimple(climate: ClimateParams) BiomeId { + const heat = climate.temperature * 100.0; + const humidity = climate.humidity * 100.0; + const continental = climate.continentalness; + + // Ocean check + if (continental < 0.35) { + if (continental < 0.20) return .deep_ocean; + return .ocean; + } + + // Simple land biome selection based on heat/humidity + if (heat < 20) { + return if (humidity > 50) .taiga else .snow_tundra; + } else if (heat < 40) { + return if (humidity > 60) .taiga else .plains; + } else if (heat < 60) { + return if (humidity > 70) .forest else .plains; + } else if (heat < 80) { + return if (humidity > 60) .jungle else if (humidity > 30) .savanna else .desert; + } else { + return if (humidity > 40) .badlands else .desert; + } +} diff --git a/src/world/worldgen/biome_source.zig b/src/world/worldgen/biome_source.zig new file mode 100644 index 00000000..a0d43994 --- /dev/null +++ b/src/world/worldgen/biome_source.zig @@ -0,0 +1,157 @@ +//! BiomeSource - Unified biome selection interface (Issue #147). +//! Orchestrates the registry, selector, edge detector, and color provider modules. + +const registry = @import("biome_registry.zig"); +const selector = @import("biome_selector.zig"); +const edge_detector = @import("biome_edge_detector.zig"); +const color_provider = @import("biome_color_provider.zig"); + +const BiomeId = registry.BiomeId; +const BiomeDefinition = registry.BiomeDefinition; +const ClimateParams = registry.ClimateParams; +const StructuralParams = registry.StructuralParams; +const BiomeEdgeInfo = edge_detector.BiomeEdgeInfo; + +/// Result of biome selection with blending information +pub const BiomeResult = struct { + primary: BiomeId, + secondary: BiomeId, // For blending (may be same as primary) + blend_factor: f32, // 0.0 = use primary, 1.0 = use secondary +}; + +/// Parameters for BiomeSource initialization +pub const BiomeSourceParams = struct { + sea_level: i32 = 64, + edge_detection_enabled: bool = true, + ocean_threshold: f32 = 0.35, +}; + +/// Unified biome selection interface. +/// +/// BiomeSource wraps all biome selection logic into a single, configurable +/// interface. This allows swapping biome selection behavior for different +/// dimensions (e.g., Overworld vs Nether) without modifying the generator. +/// +/// Part of Issue #147: Modularize Terrain Generation Pipeline +pub const BiomeSource = struct { + params: BiomeSourceParams, + + /// Initialize with default parameters + pub fn init() BiomeSource { + return initWithParams(.{}); + } + + /// Initialize with custom parameters + pub fn initWithParams(params: BiomeSourceParams) BiomeSource { + return .{ .params = params }; + } + + /// Primary biome selection interface. + /// + /// Selects a biome based on climate and structural parameters, + /// with optional river override. + /// + /// Note: `self` is retained (rather than making this a namespace function) + /// so that BiomeSource remains a consistent instance-based interface. + /// Future dimension support (e.g., Nether) may use `self.params` here. + pub fn selectBiome( + self: *const BiomeSource, + climate: ClimateParams, + structural: StructuralParams, + river_mask: f32, + ) BiomeId { + _ = self; + return selector.selectBiomeWithConstraintsAndRiver(climate, structural, river_mask); + } + + /// Select biome with edge detection and transition biome injection. + /// + /// This is the full biome selection that includes checking for + /// biome boundaries and inserting appropriate transition biomes. + pub fn selectBiomeWithEdge( + self: *const BiomeSource, + climate: ClimateParams, + structural: StructuralParams, + river_mask: f32, + edge_info: BiomeEdgeInfo, + ) BiomeResult { + // First, get the base biome + const base_biome = self.selectBiome(climate, structural, river_mask); + + // If edge detection is disabled or no edge detected, return base + if (!self.params.edge_detection_enabled or edge_info.edge_band == .none) { + return .{ + .primary = base_biome, + .secondary = base_biome, + .blend_factor = 0.0, + }; + } + + // Check if transition is needed + if (edge_info.neighbor_biome) |neighbor| { + if (edge_detector.getTransitionBiome(base_biome, neighbor)) |transition| { + // Set blend factor based on edge band + const blend: f32 = switch (edge_info.edge_band) { + .inner => 0.3, // Closer to boundary: more original showing through + .middle => 0.2, + .outer => 0.1, + .none => 0.0, + }; + return .{ + .primary = transition, + .secondary = base_biome, + .blend_factor = blend, + }; + } + } + + // No transition needed + return .{ + .primary = base_biome, + .secondary = base_biome, + .blend_factor = 0.0, + }; + } + + /// Simplified biome selection for LOD levels + pub fn selectBiomeSimplified(self: *const BiomeSource, climate: ClimateParams) BiomeId { + _ = self; + return selector.selectBiomeSimple(climate); + } + + /// Check if a position is ocean based on continentalness + pub fn isOcean(self: *const BiomeSource, continentalness: f32) bool { + return continentalness < self.params.ocean_threshold; + } + + /// Get the biome definition for a biome ID + pub fn getDefinition(_: *const BiomeSource, biome_id: BiomeId) BiomeDefinition { + return registry.getBiomeDefinition(biome_id).*; + } + + /// Get biome color for rendering + pub fn getColor(_: *const BiomeSource, biome_id: BiomeId) u32 { + return color_provider.getBiomeColor(biome_id); + } + + /// Compute climate parameters from raw values + pub fn computeClimate( + self: *const BiomeSource, + temperature: f32, + humidity: f32, + terrain_height: i32, + continentalness: f32, + erosion: f32, + max_height: i32, + ) ClimateParams { + return selector.computeClimateParams( + temperature, + humidity, + terrain_height, + continentalness, + erosion, + self.params.sea_level, + max_height, + ); + } +}; diff --git a/src/world/worldgen/coastal_generator.zig b/src/world/worldgen/coastal_generator.zig new file mode 100644 index 00000000..0bc996b8 --- /dev/null +++ b/src/world/worldgen/coastal_generator.zig @@ -0,0 +1,46 @@ +const noise_mod = @import("noise.zig"); +const clamp01 = noise_mod.clamp01; +const noise_sampler_mod = @import("noise_sampler.zig"); +const NoiseSampler = noise_sampler_mod.NoiseSampler; +const surface_builder_mod = @import("surface_builder.zig"); +const SurfaceBuilder = surface_builder_mod.SurfaceBuilder; +const CoastalSurfaceType = surface_builder_mod.CoastalSurfaceType; + +/// Coastal classifier and ocean/inland water helper. +pub const CoastalGenerator = struct { + ocean_threshold: f32, + + pub fn init(ocean_threshold: f32) CoastalGenerator { + return .{ .ocean_threshold = ocean_threshold }; + } + + pub fn getSurfaceType( + surface_builder: *const SurfaceBuilder, + continentalness: f32, + slope: i32, + height: i32, + erosion: f32, + ) CoastalSurfaceType { + return surface_builder.getCoastalSurfaceType(continentalness, slope, height, erosion); + } + + pub fn isOceanWater(self: *const CoastalGenerator, noise_sampler: *const NoiseSampler, wx: f32, wz: f32) bool { + const warp = noise_sampler.computeWarp(wx, wz, 0); + const xw = wx + warp.x; + const zw = wz + warp.z; + const c = noise_sampler.getContinentalness(xw, zw, 0); + return c < self.ocean_threshold; + } + + pub fn isInlandWater(self: *const CoastalGenerator, noise_sampler: *const NoiseSampler, wx: f32, wz: f32, height: i32, sea_level: i32) bool { + const warp = noise_sampler.computeWarp(wx, wz, 0); + const xw = wx + warp.x; + const zw = wz + warp.z; + const c = noise_sampler.getContinentalness(xw, zw, 0); + return height < sea_level and c >= self.ocean_threshold; + } + + pub fn applyCoastJitter(base_continentalness: f32, coast_jitter: f32) f32 { + return clamp01(base_continentalness + coast_jitter); + } +}; diff --git a/src/world/worldgen/decoration_provider.zig b/src/world/worldgen/decoration_provider.zig index 92aed887..3e8e243d 100644 --- a/src/world/worldgen/decoration_provider.zig +++ b/src/world/worldgen/decoration_provider.zig @@ -12,24 +12,7 @@ pub const DecorationProvider = struct { ptr: ?*anyopaque, vtable: *const VTable, - pub const VTable = struct { - decorate: *const fn ( - ptr: ?*anyopaque, - chunk: *Chunk, - local_x: u32, - local_z: u32, - surface_y: i32, - surface_block: BlockType, - biome: BiomeId, - variant: f32, - allow_subbiomes: bool, - veg_mult: f32, - random: std.Random, - ) void, - }; - - pub fn decorate( - self: DecorationProvider, + pub const DecorationContext = struct { chunk: *Chunk, local_x: u32, local_z: u32, @@ -40,19 +23,16 @@ pub const DecorationProvider = struct { allow_subbiomes: bool, veg_mult: f32, random: std.Random, - ) void { - self.vtable.decorate( - self.ptr, - chunk, - local_x, - local_z, - surface_y, - surface_block, - biome, - variant, - allow_subbiomes, - veg_mult, - random, - ); + }; + + pub const VTable = struct { + decorate: *const fn ( + ptr: ?*anyopaque, + ctx: DecorationContext, + ) void, + }; + + pub fn decorate(self: DecorationProvider, ctx: DecorationContext) void { + self.vtable.decorate(self.ptr, ctx); } }; diff --git a/src/world/worldgen/decoration_registry.zig b/src/world/worldgen/decoration_registry.zig index f7dff6ac..6ea17af3 100644 --- a/src/world/worldgen/decoration_registry.zig +++ b/src/world/worldgen/decoration_registry.zig @@ -20,6 +20,7 @@ pub const Schematic = types.Schematic; pub const SchematicDecoration = types.SchematicDecoration; pub const Decoration = types.Decoration; pub const DecorationProvider = @import("decoration_provider.zig").DecorationProvider; +pub const DecorationContext = @import("decoration_provider.zig").DecorationProvider.DecorationContext; pub const DECORATIONS = [_]Decoration{ // === Grass === @@ -129,20 +130,18 @@ pub const StandardDecorationProvider = struct { return true; } - fn decorate( - ptr: ?*anyopaque, - chunk: *Chunk, - local_x: u32, - local_z: u32, - surface_y: i32, - surface_block: BlockType, - biome: BiomeId, - variant: f32, - allow_subbiomes: bool, - veg_mult: f32, - random: std.Random, - ) void { + fn decorate(ptr: ?*anyopaque, ctx: DecorationContext) void { _ = ptr; + const chunk = ctx.chunk; + const local_x = ctx.local_x; + const local_z = ctx.local_z; + const surface_y = ctx.surface_y; + const surface_block = ctx.surface_block; + const biome = ctx.biome; + const variant = ctx.variant; + const allow_subbiomes = ctx.allow_subbiomes; + const veg_mult = ctx.veg_mult; + const random = ctx.random; // 1. Static decorations (flowers, grass) for (DECORATIONS) |deco| { diff --git a/src/world/worldgen/lighting_computer.zig b/src/world/worldgen/lighting_computer.zig new file mode 100644 index 00000000..13d709a8 --- /dev/null +++ b/src/world/worldgen/lighting_computer.zig @@ -0,0 +1,107 @@ +const std = @import("std"); +const Chunk = @import("../chunk.zig").Chunk; +const CHUNK_SIZE_X = @import("../chunk.zig").CHUNK_SIZE_X; +const CHUNK_SIZE_Y = @import("../chunk.zig").CHUNK_SIZE_Y; +const CHUNK_SIZE_Z = @import("../chunk.zig").CHUNK_SIZE_Z; +const MAX_LIGHT = @import("../chunk.zig").MAX_LIGHT; +const block_registry = @import("../block_registry.zig"); + +pub const LightingComputer = struct { + const LightNode = struct { + x: u8, + y: u16, + z: u8, + r: u4, + g: u4, + b: u4, + }; + + pub fn computeSkylight(chunk: *Chunk) void { + var local_z: u32 = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + var sky_light: u4 = MAX_LIGHT; + var y: i32 = CHUNK_SIZE_Y - 1; + while (y >= 0) : (y -= 1) { + const uy: u32 = @intCast(y); + const block = chunk.getBlock(local_x, uy, local_z); + chunk.setSkyLight(local_x, uy, local_z, sky_light); + if (block_registry.getBlockDefinition(block).isOpaque()) { + sky_light = 0; + } else if (block == .water and sky_light > 0) { + sky_light -= 1; + } + } + } + } + } + + pub fn computeBlockLight(chunk: *Chunk, allocator: std.mem.Allocator) !void { + var queue = std.ArrayListUnmanaged(LightNode){}; + defer queue.deinit(allocator); + var local_z: u32 = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + var y: u32 = 0; + while (y < CHUNK_SIZE_Y) : (y += 1) { + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + const block = chunk.getBlock(local_x, y, local_z); + const emission = block_registry.getBlockDefinition(block).light_emission; + if (emission[0] > 0 or emission[1] > 0 or emission[2] > 0) { + chunk.setBlockLightRGB(local_x, y, local_z, emission[0], emission[1], emission[2]); + try queue.append(allocator, .{ + .x = @intCast(local_x), + .y = @intCast(y), + .z = @intCast(local_z), + .r = emission[0], + .g = emission[1], + .b = emission[2], + }); + } + } + } + } + var head: usize = 0; + while (head < queue.items.len) : (head += 1) { + const node = queue.items[head]; + const neighbors = [6][3]i32{ .{ 1, 0, 0 }, .{ -1, 0, 0 }, .{ 0, 1, 0 }, .{ 0, -1, 0 }, .{ 0, 0, 1 }, .{ 0, 0, -1 } }; + for (neighbors) |offset| { + const nx = @as(i32, node.x) + offset[0]; + const ny = @as(i32, node.y) + offset[1]; + const nz = @as(i32, node.z) + offset[2]; + if (nx >= 0 and nx < CHUNK_SIZE_X and ny >= 0 and ny < CHUNK_SIZE_Y and nz >= 0 and nz < CHUNK_SIZE_Z) { + const ux: u32 = @intCast(nx); + const uy: u32 = @intCast(ny); + const uz: u32 = @intCast(nz); + const block = chunk.getBlock(ux, uy, uz); + if (!block_registry.getBlockDefinition(block).isOpaque()) { + const current_light = chunk.getLight(ux, uy, uz); + const current_r = current_light.getBlockLightR(); + const current_g = current_light.getBlockLightG(); + const current_b = current_light.getBlockLightB(); + + const next_r: u4 = if (node.r > 1) node.r - 1 else 0; + const next_g: u4 = if (node.g > 1) node.g - 1 else 0; + const next_b: u4 = if (node.b > 1) node.b - 1 else 0; + + if (next_r > current_r or next_g > current_g or next_b > current_b) { + const new_r = @max(next_r, current_r); + const new_g = @max(next_g, current_g); + const new_b = @max(next_b, current_b); + chunk.setBlockLightRGB(ux, uy, uz, new_r, new_g, new_b); + try queue.append(allocator, .{ + .x = @intCast(nx), + .y = @intCast(ny), + .z = @intCast(nz), + .r = new_r, + .g = new_g, + .b = new_b, + }); + } + } + } + } + } + } +}; diff --git a/src/world/worldgen/overworld_generator.zig b/src/world/worldgen/overworld_generator.zig index 63c9fa2f..affe5b1b 100644 --- a/src/world/worldgen/overworld_generator.zig +++ b/src/world/worldgen/overworld_generator.zig @@ -1,49 +1,12 @@ -//! Terrain generator using Luanti-style phased pipeline per worldgen-luanti-style.md -//! Phase A: Terrain Shape (stone + water only, biome-agnostic) -//! Phase B: Biome Calculation (climate space, weights) -//! Phase C: Surface Dusting (top/filler replacement) -//! Phase D: Cave Carving -//! Phase E: Decorations and Features -//! -//! LOD Support (Issue #114): -//! - LOD0: Full generation (all phases) -//! - LOD1: Skip worm caves, reduced decoration density -//! - LOD2: Skip all caves, skip decorations, simplified noise -//! - LOD3: Heightmap-only, no 3D data -//! -//! Modular Architecture (Issue #147): -//! The generator now delegates to specialized subsystems: -//! - NoiseSampler: Pure noise generation -//! - HeightSampler: Terrain height computation -//! - SurfaceBuilder: Surface block placement -//! - BiomeSource: Biome selection (in biome.zig) +//! Terrain generator orchestrator for Luanti-style phased worldgen. +//! Phase responsibilities are delegated to dedicated subsystems. const std = @import("std"); -const noise_mod = @import("noise.zig"); -const Noise = noise_mod.Noise; -const smoothstep = noise_mod.smoothstep; -const clamp01 = noise_mod.clamp01; -const ConfiguredNoise = noise_mod.ConfiguredNoise; -const NoiseParams = noise_mod.NoiseParams; -const Vec3f = noise_mod.Vec3f; -const CaveSystem = @import("caves.zig").CaveSystem; -const DecorationProvider = @import("decoration_provider.zig").DecorationProvider; const biome_mod = @import("biome.zig"); const BiomeId = biome_mod.BiomeId; -const BiomeSource = biome_mod.BiomeSource; const region_pkg = @import("region.zig"); -const RegionSystem = region_pkg.RegionSystem; const RegionInfo = region_pkg.RegionInfo; const RegionMood = region_pkg.RegionMood; -const BiomeDefinition = biome_mod.BiomeDefinition; -const ClimateParams = biome_mod.ClimateParams; -const gen_region = @import("gen_region.zig"); -const GenRegion = gen_region.GenRegion; -const GenRegionCache = gen_region.GenRegionCache; -const ClassificationCache = gen_region.ClassificationCache; -const ClassCell = gen_region.ClassCell; -const REGION_SIZE_X = gen_region.REGION_SIZE_X; -const REGION_SIZE_Z = gen_region.REGION_SIZE_Z; const world_class = @import("world_class.zig"); const ContinentalZone = world_class.ContinentalZone; const SurfaceType = world_class.SurfaceType; @@ -51,121 +14,28 @@ const Chunk = @import("../chunk.zig").Chunk; const CHUNK_SIZE_X = @import("../chunk.zig").CHUNK_SIZE_X; const CHUNK_SIZE_Y = @import("../chunk.zig").CHUNK_SIZE_Y; const CHUNK_SIZE_Z = @import("../chunk.zig").CHUNK_SIZE_Z; -const MAX_LIGHT = @import("../chunk.zig").MAX_LIGHT; const BlockType = @import("../block.zig").BlockType; -const block_registry = @import("../block_registry.zig"); -const Biome = @import("../block.zig").Biome; const lod_chunk = @import("../lod_chunk.zig"); const LODLevel = lod_chunk.LODLevel; const LODSimplifiedData = lod_chunk.LODSimplifiedData; - -// Issue #147: Import modular subsystems -const noise_sampler_mod = @import("noise_sampler.zig"); -pub const NoiseSampler = noise_sampler_mod.NoiseSampler; -const height_sampler_mod = @import("height_sampler.zig"); -pub const HeightSampler = height_sampler_mod.HeightSampler; -const surface_builder_mod = @import("surface_builder.zig"); -pub const SurfaceBuilder = surface_builder_mod.SurfaceBuilder; -pub const CoastalSurfaceType = surface_builder_mod.CoastalSurfaceType; - +const DecorationProvider = @import("decoration_provider.zig").DecorationProvider; +const gen_region = @import("gen_region.zig"); +const ClassificationCache = gen_region.ClassificationCache; const gen_interface = @import("generator_interface.zig"); const Generator = gen_interface.Generator; const GeneratorInfo = gen_interface.GeneratorInfo; -const GenerationOptions = gen_interface.GenerationOptions; const ColumnInfo = gen_interface.ColumnInfo; - -// ============================================================================ -// Luanti V7-Style Noise Parameters (Issue #105) -// These define the multi-layer terrain generation system -// ============================================================================ - -/// Create NoiseParams with a seed offset from base seed -fn makeNoiseParams(base_seed: u64, offset: u64, spread: f32, scale: f32, off: f32, octaves: u16, persist: f32) NoiseParams { - return .{ - .seed = base_seed +% offset, - .spread = Vec3f.uniform(spread), - .scale = scale, - .offset = off, - .octaves = octaves, - .persist = persist, - .lacunarity = 2.0, - .flags = .{}, - }; -} - -// ============================================================================ -// Path System Constants (from region spec) -// ============================================================================ -const VALLEY_DEPTH: f32 = 10.0; -const RIVER_DEPTH: f32 = 15.0; - -/// Terrain generation parameters -const Params = struct { - warp_scale: f32 = 1.0 / 200.0, - warp_amplitude: f32 = 30.0, - continental_scale: f32 = 1.0 / 1500.0, - - // Continental Zones: - ocean_threshold: f32 = 0.35, - continental_deep_ocean_max: f32 = 0.20, - continental_ocean_max: f32 = 0.35, - continental_coast_max: f32 = 0.42, - continental_inland_low_max: f32 = 0.60, - continental_inland_high_max: f32 = 0.75, - - erosion_scale: f32 = 1.0 / 400.0, - peaks_scale: f32 = 1.0 / 300.0, - temperature_macro_scale: f32 = 1.0 / 2000.0, - temperature_local_scale: f32 = 1.0 / 200.0, - humidity_macro_scale: f32 = 1.0 / 2000.0, - humidity_local_scale: f32 = 1.0 / 200.0, - climate_macro_weight: f32 = 0.75, - temp_lapse: f32 = 0.25, - sea_level: i32 = 64, - - // Mountains - mount_amp: f32 = 60.0, - mount_cap: f32 = 120.0, - detail_scale: f32 = 1.0 / 32.0, // SMALL - every ~32 blocks - detail_amp: f32 = 6.0, - highland_range: f32 = 80.0, - coast_jitter_scale: f32 = 1.0 / 150.0, - seabed_scale: f32 = 1.0 / 100.0, - seabed_amp: f32 = 2.0, - river_scale: f32 = 1.0 / 800.0, - river_min: f32 = 0.90, - river_max: f32 = 0.95, - river_depth_max: f32 = 6.0, - - // Beach - very narrow - coast_continentalness_min: f32 = 0.35, - coast_continentalness_max: f32 = 0.40, - beach_max_height_above_sea: i32 = 3, - beach_max_slope: i32 = 2, - cliff_min_slope: i32 = 5, - gravel_erosion_threshold: f32 = 0.7, - coastal_no_tree_min: i32 = 8, - coastal_no_tree_max: i32 = 18, - - // Mountains - mount_inland_min: f32 = 0.60, - mount_inland_max: f32 = 0.80, - mount_peak_min: f32 = 0.55, - mount_peak_max: f32 = 0.85, - mount_rugged_min: f32 = 0.35, - mount_rugged_max: f32 = 0.75, - - mid_freq_hill_scale: f32 = 1.0 / 64.0, // SMALL - hills every ~64 blocks - mid_freq_hill_amp: f32 = 12.0, - peak_compression_offset: f32 = 80.0, - peak_compression_range: f32 = 80.0, - terrace_step: f32 = 4.0, - ridge_scale: f32 = 1.0 / 400.0, - ridge_amp: f32 = 25.0, - ridge_inland_min: f32 = 0.50, - ridge_inland_max: f32 = 0.70, - ridge_sparsity: f32 = 0.50, -}; +const log = @import("../../engine/core/log.zig"); + +const terrain_shape_mod = @import("terrain_shape_generator.zig"); +const TerrainShapeGenerator = terrain_shape_mod.TerrainShapeGenerator; +const NoiseSampler = terrain_shape_mod.NoiseSampler; +const HeightSampler = terrain_shape_mod.HeightSampler; +const SurfaceBuilder = terrain_shape_mod.SurfaceBuilder; +const CoastalSurfaceType = terrain_shape_mod.CoastalSurfaceType; +const BiomeSource = @import("biome.zig").BiomeSource; +const BiomeDecorator = @import("biome_decorator.zig").BiomeDecorator; +const LightingComputer = @import("lighting_computer.zig").LightingComputer; pub const OverworldGenerator = struct { pub const INFO = GeneratorInfo{ @@ -173,210 +43,95 @@ pub const OverworldGenerator = struct { .description = "Standard terrain with diverse biomes and caves.", }; - // DEPRECATED (Issue #147): These noise fields are retained for backward compatibility. - // New code should use noise_sampler subsystem instead. These will be removed in a future version. - // The noise_sampler contains identical noise generators initialized with the same seed. - warp_noise_x: ConfiguredNoise, - warp_noise_z: ConfiguredNoise, - continentalness_noise: ConfiguredNoise, - erosion_noise: ConfiguredNoise, - peaks_noise: ConfiguredNoise, - temperature_noise: ConfiguredNoise, - humidity_noise: ConfiguredNoise, - temperature_local_noise: ConfiguredNoise, - humidity_local_noise: ConfiguredNoise, - detail_noise: ConfiguredNoise, - coast_jitter_noise: ConfiguredNoise, - seabed_noise: ConfiguredNoise, - river_noise: ConfiguredNoise, - beach_exposure_noise: ConfiguredNoise, - cave_system: CaveSystem, - filler_depth_noise: ConfiguredNoise, - mountain_lift_noise: ConfiguredNoise, - ridge_noise: ConfiguredNoise, - params: Params, allocator: std.mem.Allocator, - - // Classification cache for LOD generation (Issue #119) classification_cache: ClassificationCache, - // Last player position for cache recentering cache_center_x: i32, cache_center_z: i32, - - // DEPRECATED (Issue #147): V7-style noises - use noise_sampler instead - terrain_base: ConfiguredNoise, - terrain_alt: ConfiguredNoise, - height_select: ConfiguredNoise, - terrain_persist: ConfiguredNoise, - variant_noise: ConfiguredNoise, - - // Issue #147: Modular subsystems for terrain generation - // These provide clean, testable interfaces to terrain generation components. - // New code should use these subsystems instead of the deprecated noise fields above. - noise_sampler: NoiseSampler, - height_sampler: HeightSampler, - surface_builder: SurfaceBuilder, - biome_source: BiomeSource, - decoration_provider: DecorationProvider, + terrain_shape: TerrainShapeGenerator, + biome_decorator: BiomeDecorator, /// Distance threshold for cache recentering (blocks). - /// When player is this far from cache center, recenter the cache. - /// 512 blocks = 1/4 of cache coverage (2048 blocks), ensures we recenter - /// before reaching the cache edge. pub const CACHE_RECENTER_THRESHOLD: i32 = 512; + pub const InitParams = struct { + terrain_shape: terrain_shape_mod.Params = .{}, + }; + pub fn init(seed: u64, allocator: std.mem.Allocator, decoration_provider: DecorationProvider) OverworldGenerator { - const p = Params{}; + return initWithParams(seed, allocator, decoration_provider, .{}); + } + + pub fn initWithParams(seed: u64, allocator: std.mem.Allocator, decoration_provider: DecorationProvider, params: InitParams) OverworldGenerator { return .{ - .warp_noise_x = ConfiguredNoise.init(makeNoiseParams(seed, 10, 200, p.warp_amplitude, 0, 3, 0.5)), - .warp_noise_z = ConfiguredNoise.init(makeNoiseParams(seed, 11, 200, p.warp_amplitude, 0, 3, 0.5)), - .continentalness_noise = ConfiguredNoise.init(makeNoiseParams(seed, 20, 1500, 1.0, 0, 4, 0.5)), - .erosion_noise = ConfiguredNoise.init(makeNoiseParams(seed, 30, 400, 1.0, 0, 4, 0.5)), - .peaks_noise = ConfiguredNoise.init(makeNoiseParams(seed, 40, 300, 1.0, 0, 5, 0.5)), - .temperature_noise = ConfiguredNoise.init(makeNoiseParams(seed, 50, 2000, 1.0, 0, 3, 0.5)), - .humidity_noise = ConfiguredNoise.init(makeNoiseParams(seed, 60, 2000, 1.0, 0, 3, 0.5)), - .temperature_local_noise = ConfiguredNoise.init(makeNoiseParams(seed, 70, 200, 1.0, 0, 3, 0.5)), - .humidity_local_noise = ConfiguredNoise.init(makeNoiseParams(seed, 80, 200, 1.0, 0, 3, 0.5)), - .detail_noise = ConfiguredNoise.init(makeNoiseParams(seed, 90, 32, p.detail_amp, 0, 3, 0.5)), - .coast_jitter_noise = ConfiguredNoise.init(makeNoiseParams(seed, 100, 150, 0.03, 0, 2, 0.5)), - .seabed_noise = ConfiguredNoise.init(makeNoiseParams(seed, 110, 100, p.seabed_amp, 0, 2, 0.5)), - .river_noise = ConfiguredNoise.init(makeNoiseParams(seed, 120, 800, 1.0, 0, 4, 0.5)), - .beach_exposure_noise = ConfiguredNoise.init(makeNoiseParams(seed, 130, 100, 1.0, 0, 3, 0.5)), - .cave_system = CaveSystem.init(seed), - .filler_depth_noise = ConfiguredNoise.init(makeNoiseParams(seed, 140, 64, 1.0, 0, 3, 0.5)), - .mountain_lift_noise = ConfiguredNoise.init(makeNoiseParams(seed, 150, 400, 1.0, 0, 3, 0.5)), - .ridge_noise = ConfiguredNoise.init(makeNoiseParams(seed, 160, 400, 1.0, 0, 5, 0.5)), - .params = .{}, .allocator = allocator, .classification_cache = ClassificationCache.init(), .cache_center_x = 0, .cache_center_z = 0, - - // V7-style terrain layers - spread values based on Luanti defaults - // terrain_base: Base terrain shape, rolling hills character - // spread=300 for features every ~300 blocks (was 600 in Luanti, smaller for Minecraft feel) - .terrain_base = ConfiguredNoise.init(makeNoiseParams(seed, 1001, 300, 35, 4, 5, 0.6)), - - // terrain_alt: Alternate terrain shape, flatter character - // Blended with terrain_base using height_select - .terrain_alt = ConfiguredNoise.init(makeNoiseParams(seed, 1002, 300, 20, 4, 5, 0.6)), - - // height_select: Blend factor between base and alt terrain - // Controls where terrain has base vs alt character - .height_select = ConfiguredNoise.init(makeNoiseParams(seed, 1003, 250, 16, -8, 6, 0.6)), - - // terrain_persist: Detail variation multiplier - // Modulates how much fine detail appears in different areas - .terrain_persist = ConfiguredNoise.init(makeNoiseParams(seed, 1004, 1000, 0.15, 0.6, 3, 0.6)), - - // variant_noise: Low-frequency noise for sub-biomes (Issue #110) - // Spread 250 blocks for reasonably sized patches - .variant_noise = ConfiguredNoise.init(makeNoiseParams(seed, 1008, 250, 1.0, 0.0, 3, 0.5)), - - // Issue #147: Initialize modular subsystems - .noise_sampler = NoiseSampler.init(seed), - .height_sampler = HeightSampler.init(), - .surface_builder = SurfaceBuilder.init(), - .biome_source = BiomeSource.init(), - .decoration_provider = decoration_provider, + .terrain_shape = TerrainShapeGenerator.initWithParams(seed, params.terrain_shape), + .biome_decorator = BiomeDecorator.init(seed, decoration_provider), }; } - // ========================================================================= - // Issue #147: Subsystem Accessors - // These provide direct access to modular subsystems for callers that need - // isolated functionality without the full generator. - // ========================================================================= - - /// Get the noise sampler subsystem for direct noise value access pub fn getNoiseSampler(self: *const OverworldGenerator) *const NoiseSampler { - return &self.noise_sampler; + return self.terrain_shape.getNoiseSampler(); } - /// Get the height sampler subsystem for terrain height computation pub fn getHeightSampler(self: *const OverworldGenerator) *const HeightSampler { - return &self.height_sampler; + return self.terrain_shape.getHeightSampler(); } - /// Get the surface builder subsystem for surface block placement pub fn getSurfaceBuilder(self: *const OverworldGenerator) *const SurfaceBuilder { - return &self.surface_builder; + return self.terrain_shape.getSurfaceBuilder(); } - /// Get the biome source subsystem for biome selection pub fn getBiomeSource(self: *const OverworldGenerator) *const BiomeSource { - return &self.biome_source; + return self.terrain_shape.getBiomeSource(); } - /// Get the world seed pub fn getSeed(self: *const OverworldGenerator) u64 { - return self.noise_sampler.getSeed(); + return self.terrain_shape.getSeed(); } - /// Get region info for a specific world position pub fn getRegionInfo(self: *const OverworldGenerator, world_x: i32, world_z: i32) RegionInfo { - return region_pkg.getRegion(self.continentalness_noise.params.seed, world_x, world_z); + return self.terrain_shape.getRegionInfo(world_x, world_z); } - /// Get region mood for a specific world position (Issue #110) pub fn getMood(self: *const OverworldGenerator, world_x: i32, world_z: i32) RegionMood { - const region = region_pkg.getRegion(self.continentalness_noise.params.seed, world_x, world_z); - return region.mood; + return self.getRegionInfo(world_x, world_z).mood; } pub fn getColumnInfo(self: *const OverworldGenerator, wx: f32, wz: f32) ColumnInfo { - const p = self.params; - const sea: f32 = @floatFromInt(p.sea_level); - const warp = self.computeWarp(wx, wz, 0); - const xw = wx + warp.x; - const zw = wz + warp.z; - const c = self.getContinentalness(xw, zw, 0); - const e = self.getErosion(xw, zw, 0); - const pv = self.getPeaksValleys(xw, zw, 0); - const coast_jitter = self.coast_jitter_noise.get2DOctaves(xw, zw, 2); - const c_jittered = clamp01(c + coast_jitter); - const river_mask = self.getRiverMask(xw, zw, 0); - // computeHeight now handles ocean vs land decision internally - const region = region_pkg.getRegion(self.continentalness_noise.params.seed, @as(i32, @intFromFloat(wx)), @as(i32, @intFromFloat(wz))); - const terrain_height = self.computeHeight(c_jittered, e, pv, xw, zw, river_mask, region, 0); // Full detail for physics - const ridge_mask = self.getRidgeFactor(xw, zw, c_jittered, 0); - const terrain_height_i: i32 = @intFromFloat(terrain_height); - const altitude_offset: f32 = @max(0, terrain_height - sea); - var temperature = self.getTemperature(xw, zw, 0); - temperature = clamp01(temperature - (altitude_offset / 512.0) * p.temp_lapse); - const humidity = self.getHumidity(xw, zw, 0); - const climate = biome_mod.computeClimateParams(temperature, humidity, terrain_height_i, c_jittered, e, p.sea_level, CHUNK_SIZE_Y); + const column = self.terrain_shape.sampleColumnData(wx, wz, 0); + const climate = self.terrain_shape.biome_source.computeClimate( + column.temperature, + column.humidity, + column.terrain_height_i, + column.continentalness, + column.erosion, + CHUNK_SIZE_Y, + ); - const slope: i32 = 1; const structural = biome_mod.StructuralParams{ - .height = terrain_height_i, - .slope = slope, - .continentalness = c_jittered, - .ridge_mask = ridge_mask, + .height = column.terrain_height_i, + .slope = 1, + .continentalness = column.continentalness, + .ridge_mask = column.ridge_mask, }; - const biome_id = biome_mod.selectBiomeWithConstraintsAndRiver(climate, structural, river_mask); + const biome_id = self.terrain_shape.biome_source.selectBiome(climate, structural, column.river_mask); return .{ - .height = terrain_height_i, + .height = column.terrain_height_i, .biome = biome_id, - .is_ocean = c_jittered < p.ocean_threshold, - .temperature = temperature, - .humidity = humidity, - .continentalness = c_jittered, + .is_ocean = column.continentalness < self.terrain_shape.getOceanThreshold(), + .temperature = column.temperature, + .humidity = column.humidity, + .continentalness = column.continentalness, }; } - /// Check if classification cache should be recentered around player position. - /// Call this periodically (e.g., in LODManager.update or World.update). - /// Recentering invalidates the cache, so LOD chunks will fall back to - /// full-detail computation until LOD0 populates the cache again. - /// - /// Returns true if recentering occurred. pub fn maybeRecenterCache(self: *OverworldGenerator, player_x: i32, player_z: i32) bool { const dx = player_x - self.cache_center_x; const dz = player_z - self.cache_center_z; - - // Check if player has moved far enough from cache center if (dx * dx + dz * dz > CACHE_RECENTER_THRESHOLD * CACHE_RECENTER_THRESHOLD) { self.classification_cache.recenter(player_x, player_z); self.cache_center_x = player_x; @@ -390,481 +145,82 @@ pub const OverworldGenerator = struct { chunk.generated = false; const world_x = chunk.getWorldX(); const world_z = chunk.getWorldZ(); - const p = self.params; - const sea: f32 = @floatFromInt(p.sea_level); - // Issue #119 Phase 4: Ensure cache is centered near this chunk on first generation. - // This handles the case where player spawns far from (0,0). - // If chunk is outside cache bounds, recenter around it. if (!self.classification_cache.contains(world_x, world_z)) { self.classification_cache.recenter(world_x, world_z); self.cache_center_x = world_x; self.cache_center_z = world_z; } - var surface_heights: [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32 = undefined; - var biome_ids: [CHUNK_SIZE_X * CHUNK_SIZE_Z]BiomeId = undefined; - var secondary_biome_ids: [CHUNK_SIZE_X * CHUNK_SIZE_Z]BiomeId = undefined; - var biome_blends: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var filler_depths: [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32 = undefined; - var is_underwater_flags: [CHUNK_SIZE_X * CHUNK_SIZE_Z]bool = undefined; // Any water (ocean or lake) - var is_ocean_water_flags: [CHUNK_SIZE_X * CHUNK_SIZE_Z]bool = undefined; // True ocean (c < threshold) - var cave_region_values: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var debug_temperatures: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var debug_humidities: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var debug_continentalness: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var continentalness_values: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var erosion_values: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var ridge_masks: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var river_masks: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var temperatures: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - var humidities: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - - var local_z: u32 = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - if (stop_flag) |sf| if (sf.*) return; - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const idx = local_x + local_z * CHUNK_SIZE_X; - const wx: f32 = @floatFromInt(world_x + @as(i32, @intCast(local_x))); - const wz: f32 = @floatFromInt(world_z + @as(i32, @intCast(local_z))); - const warp = self.computeWarp(wx, wz, 0); - const xw = wx + warp.x; - const zw = wz + warp.z; - const c = self.getContinentalness(xw, zw, 0); - const e_val = self.getErosion(xw, zw, 0); - const pv = self.getPeaksValleys(xw, zw, 0); - const coast_jitter = self.coast_jitter_noise.get2DOctaves(xw, zw, 2); - const c_jittered = clamp01(c + coast_jitter); - erosion_values[idx] = e_val; - const river_mask = self.getRiverMask(xw, zw, 0); - // Get Region Info (Mood + Role) - const region = region_pkg.getRegion(self.continentalness_noise.params.seed, @as(i32, @intFromFloat(wx)), @as(i32, @intFromFloat(wz))); - - // computeHeight now handles ocean vs land decision internally - const terrain_height = self.computeHeight(c_jittered, e_val, pv, xw, zw, river_mask, region, 0); // LOD0 - const ridge_mask = self.getRidgeFactor(xw, zw, c_jittered, 0); - const terrain_height_i: i32 = @intFromFloat(terrain_height); - const altitude_offset: f32 = @max(0, terrain_height - sea); - var temperature = self.getTemperature(xw, zw, 0); - temperature = clamp01(temperature - (altitude_offset / 512.0) * p.temp_lapse); - const humidity = self.getHumidity(xw, zw, 0); - debug_temperatures[idx] = temperature; - debug_humidities[idx] = humidity; - debug_continentalness[idx] = c_jittered; - temperatures[idx] = temperature; - humidities[idx] = humidity; - continentalness_values[idx] = c_jittered; - ridge_masks[idx] = ridge_mask; - river_masks[idx] = river_mask; - const is_underwater = terrain_height < sea; - const is_ocean_water = c_jittered < p.ocean_threshold; - surface_heights[idx] = terrain_height_i; - is_underwater_flags[idx] = is_underwater; - is_ocean_water_flags[idx] = is_ocean_water; - cave_region_values[idx] = self.cave_system.getCaveRegionValue(wx, wz); - } - } - - var slopes: [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32 = undefined; - - local_z = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - if (stop_flag) |sf| if (sf.*) return; - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const idx = local_x + local_z * CHUNK_SIZE_X; - const terrain_h = surface_heights[idx]; - var max_slope: i32 = 0; - if (local_x > 0) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - surface_heights[idx - 1])))); - if (local_x < CHUNK_SIZE_X - 1) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - surface_heights[idx + 1])))); - if (local_z > 0) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - surface_heights[idx - CHUNK_SIZE_X])))); - if (local_z < CHUNK_SIZE_Z - 1) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - surface_heights[idx + CHUNK_SIZE_X])))); - slopes[idx] = max_slope; - } - } - - // === Phase B: Base Biome Selection === - // First pass: compute base biomes for all columns - local_z = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - if (stop_flag) |sf| if (sf.*) return; - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const idx = local_x + local_z * CHUNK_SIZE_X; - const terrain_height_i = surface_heights[idx]; - const temperature = temperatures[idx]; - const humidity = humidities[idx]; - const continentalness = continentalness_values[idx]; - const erosion = erosion_values[idx]; - const ridge_mask = ridge_masks[idx]; - const slope = slopes[idx]; - const river_mask = river_masks[idx]; - const climate = biome_mod.computeClimateParams(temperature, humidity, terrain_height_i, continentalness, erosion, p.sea_level, CHUNK_SIZE_Y); - - const structural = biome_mod.StructuralParams{ - .height = terrain_height_i, - .slope = slope, - .continentalness = continentalness, - .ridge_mask = ridge_mask, - }; - - const biome_id = biome_mod.selectBiomeWithConstraintsAndRiver(climate, structural, river_mask); - biome_ids[idx] = biome_id; - secondary_biome_ids[idx] = biome_id; - biome_blends[idx] = 0.0; - } - } - - // === Phase B2: Edge Detection and Transition Biome Injection (Issue #102) === - // Use coarse grid sampling to detect biome boundaries and inject transition biomes - const EDGE_GRID_SIZE = CHUNK_SIZE_X / biome_mod.EDGE_STEP; // 4 cells for 16-block chunk - - // Optimization (Issue #119): Only run edge detection for close chunks - // This significantly improves loading performance at high render distances. - const player_dist_sq = (world_x - self.cache_center_x) * (world_x - self.cache_center_x) + - (world_z - self.cache_center_z) * (world_z - self.cache_center_z); - - if (player_dist_sq < 256 * 256) { // 16 chunks radius - // For each coarse grid cell, detect if we're near a biome edge - var gz: u32 = 0; - while (gz < EDGE_GRID_SIZE) : (gz += 1) { - if (stop_flag) |sf| if (sf.*) return; - var gx: u32 = 0; - while (gx < EDGE_GRID_SIZE) : (gx += 1) { - // Sample at the center of each grid cell - const sample_x = gx * biome_mod.EDGE_STEP + biome_mod.EDGE_STEP / 2; - const sample_z = gz * biome_mod.EDGE_STEP + biome_mod.EDGE_STEP / 2; - const sample_idx = sample_x + sample_z * CHUNK_SIZE_X; - const base_biome = biome_ids[sample_idx]; - - // Detect edge using world coordinates (allows sampling outside chunk) - const sample_wx = world_x + @as(i32, @intCast(sample_x)); - const sample_wz = world_z + @as(i32, @intCast(sample_z)); - const edge_info = self.detectBiomeEdge(sample_wx, sample_wz, base_biome); - - // If edge detected, apply transition biome to this grid cell - if (edge_info.edge_band != .none) { - if (edge_info.neighbor_biome) |neighbor| { - if (biome_mod.getTransitionBiome(base_biome, neighbor)) |transition_biome| { - // Apply transition biome to all blocks in this grid cell - var cell_z: u32 = 0; - while (cell_z < biome_mod.EDGE_STEP) : (cell_z += 1) { - var cell_x: u32 = 0; - while (cell_x < biome_mod.EDGE_STEP) : (cell_x += 1) { - const lx = gx * biome_mod.EDGE_STEP + cell_x; - const lz = gz * biome_mod.EDGE_STEP + cell_z; - if (lx < CHUNK_SIZE_X and lz < CHUNK_SIZE_Z) { - const cell_idx = lx + lz * CHUNK_SIZE_X; - // Store transition as primary, original as secondary for blending - secondary_biome_ids[cell_idx] = biome_ids[cell_idx]; - biome_ids[cell_idx] = transition_biome; - // Set blend factor based on edge band (inner = more blend) - biome_blends[cell_idx] = switch (edge_info.edge_band) { - .inner => 0.3, // Closer to boundary: more original showing through - .middle => 0.2, - .outer => 0.1, - .none => 0.0, - }; - } - } - } - } - } - } - } - } - } - - // === Phase B3: Finalize biome data === - // Set biomes on chunk and compute filler depths - local_z = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - if (stop_flag) |sf| if (sf.*) return; - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const idx = local_x + local_z * CHUNK_SIZE_X; - const biome_id = biome_ids[idx]; - chunk.setBiome(local_x, local_z, biome_id); - - const biome_def = biome_mod.getBiomeDefinition(biome_id); - filler_depths[idx] = biome_def.surface.depth_range; - } - } - - var coastal_types: [CHUNK_SIZE_X * CHUNK_SIZE_Z]CoastalSurfaceType = undefined; - var exposure_values: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32 = undefined; - - // Compute structural coastal surface types (replaces shore_dist search - Issue #95) - local_z = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - if (stop_flag) |sf| if (sf.*) return; - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const idx = local_x + local_z * CHUNK_SIZE_X; - const wx: f32 = @floatFromInt(world_x + @as(i32, @intCast(local_x))); - const wz: f32 = @floatFromInt(world_z + @as(i32, @intCast(local_z))); - exposure_values[idx] = self.beach_exposure_noise.get2DNormalizedOctaves(wx, wz, 2); - - // Use structural signals instead of distance search - const continentalness = continentalness_values[idx]; - const slope = slopes[idx]; - const height = surface_heights[idx]; - const erosion = erosion_values[idx]; - - coastal_types[idx] = self.getCoastalSurfaceType(continentalness, slope, height, erosion); - } - } + const phase_data = self.allocator.create(terrain_shape_mod.ChunkPhaseData) catch return; + defer self.allocator.destroy(phase_data); + if (!self.terrain_shape.prepareChunkPhaseData( + phase_data, + world_x, + world_z, + self.cache_center_x, + self.cache_center_z, + stop_flag, + )) return; - // === Classification Cache Population (Issue #119 Phase 2) === - // Populate the classification cache for LOD generation to sample from. - // This ensures all LOD levels use the same biome/surface/water decisions. self.populateClassificationCache( world_x, world_z, - &surface_heights, - &biome_ids, - &continentalness_values, - &is_ocean_water_flags, - &coastal_types, + &phase_data.surface_heights, + &phase_data.biome_ids, + &phase_data.continentalness_values, + &phase_data.is_ocean_water_flags, + &phase_data.coastal_types, ); - var worm_carve_map = self.cave_system.generateWormCaves(chunk, &surface_heights, self.allocator) catch { - self.generateWithoutWormCavesInternal(chunk, &surface_heights, &biome_ids, &secondary_biome_ids, &biome_blends, &filler_depths, &is_underwater_flags, &is_ocean_water_flags, &cave_region_values, &coastal_types, &slopes, &exposure_values, sea, stop_flag); - return; - }; - defer worm_carve_map.deinit(); - - var debug_beach_count: u32 = 0; - local_z = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - if (stop_flag) |sf| if (sf.*) return; - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const idx = local_x + local_z * CHUNK_SIZE_X; - const terrain_height_i = surface_heights[idx]; - const filler_depth = filler_depths[idx]; - const is_underwater = is_underwater_flags[idx]; - const is_ocean_water = is_ocean_water_flags[idx]; - const cave_region = cave_region_values[idx]; - const coastal_type = coastal_types[idx]; - const wx: f32 = @floatFromInt(world_x + @as(i32, @intCast(local_x))); - const wz: f32 = @floatFromInt(world_z + @as(i32, @intCast(local_z))); - - // Structural coastal surface detection (Issue #95) - const is_sand_beach = coastal_type == .sand_beach; - const is_gravel_beach = coastal_type == .gravel_beach; - const is_cliff = coastal_type == .cliff; - if (is_sand_beach or is_gravel_beach) debug_beach_count += 1; - - var y: i32 = 0; - const primary_biome_id = biome_ids[idx]; - const secondary_biome_id = secondary_biome_ids[idx]; - const blend = biome_blends[idx]; - const dither = self.detail_noise.noise.perlin2D(wx * 0.02, wz * 0.02) * 0.5 + 0.5; - const use_secondary = dither < blend; - const active_biome_id = if (use_secondary) secondary_biome_id else primary_biome_id; - const active_biome: Biome = @enumFromInt(@intFromEnum(active_biome_id)); + var worm_map_opt = self.terrain_shape.generateWormCaves( + chunk, + &phase_data.surface_heights, + self.allocator, + ) catch null; + defer if (worm_map_opt) |*map| map.deinit(); + const worm_map_ptr: ?*const terrain_shape_mod.CaveCarveMap = if (worm_map_opt) |*map| map else null; - // Populate chunk heightmap and biomes (Issue #107) - chunk.setSurfaceHeight(local_x, local_z, @intCast(terrain_height_i)); - chunk.biomes[idx] = active_biome_id; - - while (y < CHUNK_SIZE_Y) : (y += 1) { - var block = self.getBlockAt(y, terrain_height_i, active_biome, filler_depth, is_ocean_water, is_underwater, sea); - const is_surface = (y == terrain_height_i); - const is_near_surface = (y > terrain_height_i - 3 and y <= terrain_height_i); - - // Apply structural coastal surface types (ocean beaches only) - if (is_surface and block != .air and block != .water and block != .bedrock) { - if (is_sand_beach) { - block = .sand; - } else if (is_gravel_beach) { - block = .gravel; - } else if (is_cliff) { - block = .stone; - } - } else if (is_near_surface and (is_sand_beach or is_gravel_beach) and block == .dirt) { - block = if (is_gravel_beach) .gravel else .sand; - } - if (block != .air and block != .water and block != .bedrock) { - const wy: f32 = @floatFromInt(y); - const should_carve_worm = worm_carve_map.get(local_x, @intCast(y), local_z); - // Use updated multi-algorithm cave system (Issue #108) - const should_carve_cavity = self.cave_system.shouldCarve(wx, wy, wz, terrain_height_i, cave_region); - if (should_carve_worm or should_carve_cavity) { - block = if (y < p.sea_level) .water else .air; - } - } - chunk.setBlock(local_x, @intCast(y), local_z, block); - } - } - } + if (!self.terrain_shape.fillChunkBlocks(chunk, phase_data, worm_map_ptr, stop_flag)) return; if (stop_flag) |sf| if (sf.*) return; - self.generateOres(chunk); + self.biome_decorator.generateOres(chunk); if (stop_flag) |sf| if (sf.*) return; - self.generateFeatures(chunk); + self.biome_decorator.generateFeatures(chunk, self.terrain_shape.getNoiseSampler()); if (stop_flag) |sf| if (sf.*) return; - self.computeSkylight(chunk); + LightingComputer.computeSkylight(chunk); if (stop_flag) |sf| if (sf.*) return; - self.computeBlockLight(chunk) catch |err| { - std.debug.print("Failed to compute block light: {}\n", .{err}); + LightingComputer.computeBlockLight(chunk, self.allocator) catch |err| { + log.log.err("Failed to compute block light for chunk ({}, {}): {}", .{ chunk.chunk_x, chunk.chunk_z, err }); + return; }; + chunk.generated = true; chunk.dirty = true; - self.printDebugStats(world_x, world_z, &debug_temperatures, &debug_humidities, &debug_continentalness, &biome_ids, debug_beach_count); } - fn generateWithoutWormCavesInternal(self: *const OverworldGenerator, chunk: *Chunk, surface_heights: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32, biome_ids: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]BiomeId, secondary_biome_ids: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]BiomeId, biome_blends: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, filler_depths: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32, is_underwater_flags: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]bool, is_ocean_water_flags: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]bool, cave_region_values: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, coastal_types: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]CoastalSurfaceType, slopes: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32, exposure_values: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, sea: f32, stop_flag: ?*const bool) void { - _ = exposure_values; - _ = slopes; - const world_x = chunk.getWorldX(); - const world_z = chunk.getWorldZ(); - const p = self.params; - var local_z: u32 = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - if (stop_flag) |sf| if (sf.*) return; - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const idx = local_x + local_z * CHUNK_SIZE_X; - const terrain_height_i = surface_heights[idx]; - const filler_depth = filler_depths[idx]; - const is_underwater = is_underwater_flags[idx]; - const is_ocean_water = is_ocean_water_flags[idx]; - const cave_region = cave_region_values[idx]; - const coastal_type = coastal_types[idx]; - const wx: f32 = @floatFromInt(world_x + @as(i32, @intCast(local_x))); - const wz: f32 = @floatFromInt(world_z + @as(i32, @intCast(local_z))); - - // Structural coastal surface detection (Issue #95) - const is_sand_beach = coastal_type == .sand_beach; - const is_gravel_beach = coastal_type == .gravel_beach; - const is_cliff = coastal_type == .cliff; - - var y: i32 = 0; - const primary_biome_id = biome_ids[idx]; - const secondary_biome_id = secondary_biome_ids[idx]; - const blend = biome_blends[idx]; - const dither = self.detail_noise.noise.perlin2D(wx * 0.02, wz * 0.02) * 0.5 + 0.5; - const use_secondary = dither < blend; - const active_biome_id = if (use_secondary) secondary_biome_id else primary_biome_id; - const active_biome: Biome = @enumFromInt(@intFromEnum(active_biome_id)); - - // Populate chunk heightmap and biomes (Issue #107) - chunk.setSurfaceHeight(local_x, local_z, @intCast(terrain_height_i)); - chunk.biomes[idx] = active_biome_id; - - while (y < CHUNK_SIZE_Y) : (y += 1) { - var block = self.getBlockAt(y, terrain_height_i, active_biome, filler_depth, is_ocean_water, is_underwater, sea); - const is_surface = (y == terrain_height_i); - const is_near_surface = (y > terrain_height_i - 3 and y <= terrain_height_i); - - // Apply structural coastal surface types (ocean beaches only) - if (is_surface and block != .air and block != .water and block != .bedrock) { - if (is_sand_beach) { - block = .sand; - } else if (is_gravel_beach) { - block = .gravel; - } else if (is_cliff) { - block = .stone; - } - } else if (is_near_surface and (is_sand_beach or is_gravel_beach) and block == .dirt) { - block = if (is_gravel_beach) .gravel else .sand; - } - if (block != .air and block != .water and block != .bedrock) { - const wy: f32 = @floatFromInt(y); - if (self.cave_system.shouldCarve(wx, wy, wz, terrain_height_i, cave_region)) { - block = if (y < p.sea_level) .water else .air; - } - } - chunk.setBlock(local_x, @intCast(y), local_z, block); - } - } - } - if (stop_flag) |sf| if (sf.*) return; - self.generateOres(chunk); - if (stop_flag) |sf| if (sf.*) return; - self.generateFeatures(chunk); - if (stop_flag) |sf| if (sf.*) return; - self.computeSkylight(chunk); - if (stop_flag) |sf| if (sf.*) return; - self.computeBlockLight(chunk) catch {}; - chunk.generated = true; - chunk.dirty = true; + pub fn generateFeatures(self: *const OverworldGenerator, chunk: *Chunk) void { + self.biome_decorator.generateFeatures(chunk, self.terrain_shape.getNoiseSampler()); } - fn printDebugStats(self: *const OverworldGenerator, world_x: i32, world_z: i32, t_vals: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, h_vals: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, c_vals: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, b_ids: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]BiomeId, beach_count: u32) void { - // Debug output disabled by default. Set to true to enable debugging. - const debug_enabled = false; - if (!debug_enabled) return; + pub fn isOceanWater(self: *const OverworldGenerator, wx: f32, wz: f32) bool { + return self.terrain_shape.isOceanWater(wx, wz); + } - const chunk_id = @as(u32, @bitCast(world_x)) +% @as(u32, @bitCast(world_z)); - if (chunk_id % 64 != 0) return; - var t_min: f32 = 1.0; - var t_max: f32 = 0.0; - var t_sum: f32 = 0.0; - var h_min: f32 = 1.0; - var h_max: f32 = 0.0; - var h_sum: f32 = 0.0; - var c_min: f32 = 1.0; - var c_max: f32 = 0.0; - var c_sum: f32 = 0.0; - var biome_counts: [21]u32 = [_]u32{0} ** 21; - var zone_counts: [6]u32 = [_]u32{0} ** 6; - var t_hot: u32 = 0; - var h_dry: u32 = 0; - for (0..CHUNK_SIZE_X * CHUNK_SIZE_Z) |i| { - t_min = @min(t_min, t_vals[i]); - t_max = @max(t_max, t_vals[i]); - t_sum += t_vals[i]; - h_min = @min(h_min, h_vals[i]); - h_max = @max(h_max, h_vals[i]); - h_sum += h_vals[i]; - c_min = @min(c_min, c_vals[i]); - c_max = @max(c_max, c_vals[i]); - c_sum += c_vals[i]; - if (t_vals[i] > 0.7) t_hot += 1; - if (h_vals[i] < 0.25) h_dry += 1; - const bid = @intFromEnum(b_ids[i]); - if (bid < 21) biome_counts[bid] += 1; - const zone = self.getContinentalZone(c_vals[i]); - const zone_idx: u32 = @intFromEnum(zone); - if (zone_idx < 6) zone_counts[zone_idx] += 1; - } - const n: f32 = @floatFromInt(CHUNK_SIZE_X * CHUNK_SIZE_Z); - std.debug.print("\n=== WORLDGEN DEBUG @ chunk ({}, {}) ===\n", .{ world_x, world_z }); - std.debug.print("T: min={d:.2} max={d:.2} avg={d:.2} | hot(>0.7): {}%\n", .{ t_min, t_max, t_sum / n, t_hot * 100 / @as(u32, @intCast(CHUNK_SIZE_X * CHUNK_SIZE_Z)) }); - std.debug.print("H: min={d:.2} max={d:.2} avg={d:.2} | dry(<0.25): {}%\n", .{ h_min, h_max, h_sum / n, h_dry * 100 / @as(u32, @intCast(CHUNK_SIZE_X * CHUNK_SIZE_Z)) }); - std.debug.print("C: min={d:.2} max={d:.2} avg={d:.2}\n", .{ c_min, c_max, c_sum / n }); - std.debug.print("Beach triggers: {} / {}\n", .{ beach_count, CHUNK_SIZE_X * CHUNK_SIZE_Z }); - std.debug.print("Continental Zones: ", .{}); - for (zone_counts, 0..) |count, zi| { - if (count > 0) { - const zone: ContinentalZone = @enumFromInt(@as(u8, @intCast(zi))); - std.debug.print("{s}={} ", .{ zone.name(), count }); - } - } - std.debug.print("\n", .{}); - std.debug.print("Biomes: ", .{}); - const biome_names = [_][]const u8{ "deep_ocean", "ocean", "beach", "plains", "forest", "taiga", "desert", "snow_tundra", "mountains", "snowy_mountains", "river", "swamp", "mangrove", "jungle", "savanna", "badlands", "mushroom", "foothills", "marsh", "dry_plains", "coastal" }; - for (biome_counts, 0..) |count, bi| { - if (count > 0) std.debug.print("{s}={} ", .{ biome_names[bi], count }); - } - std.debug.print("\n", .{}); + pub fn isInlandWater(self: *const OverworldGenerator, wx: f32, wz: f32, height: i32) bool { + return self.terrain_shape.isInlandWater(wx, wz, height); } - // ============================================================================ - // LOD Heightmap Generation (Issue #119 - Classification Cache) - // ============================================================================ + pub fn getContinentalZone(self: *const OverworldGenerator, c: f32) ContinentalZone { + return self.terrain_shape.getContinentalZone(c); + } /// Generate heightmap data only (for LODSimplifiedData) /// Uses classification cache when available to ensure LOD matches LOD0. pub fn generateHeightmapOnly(self: *const OverworldGenerator, data: *LODSimplifiedData, region_x: i32, region_z: i32, lod_level: LODLevel) void { - // Cell size now depends on both LOD level and grid size const block_step = LODSimplifiedData.getCellSizeBlocks(lod_level); const world_x = region_x * @as(i32, @intCast(lod_level.regionSizeBlocks())); const world_z = region_z * @as(i32, @intCast(lod_level.regionSizeBlocks())); - const p = self.params; + const sea_level = self.terrain_shape.getSeaLevel(); var gz: u32 = 0; while (gz < data.width) : (gz += 1) { @@ -875,86 +231,44 @@ pub const OverworldGenerator = struct { const wz_i = world_z + @as(i32, @intCast(gz * block_step)); const wx: f32 = @floatFromInt(wx_i); const wz: f32 = @floatFromInt(wz_i); - - // Compute octave reduction from LOD level const reduction: u8 = @intCast(@intFromEnum(lod_level)); + const column = self.terrain_shape.sampleColumnData(wx, wz, reduction); + + data.heightmap[idx] = column.terrain_height; - // === Issue #119: Try classification cache first === - // If this position was generated at LOD0, use the cached values - // to ensure biome/surface consistency across all LOD levels. if (self.classification_cache.get(wx_i, wz_i)) |cached| { - // Use cached biome and surface type from LOD0 generation data.biomes[idx] = cached.biome_id; data.top_blocks[idx] = self.surfaceTypeToBlock(cached.surface_type); data.colors[idx] = biome_mod.getBiomeColor(cached.biome_id); + continue; + } - // Still need to compute height (it's always needed for mesh) - // Use reduction to ensure smooth distant terrain even if cached - const warp = self.computeWarp(wx, wz, reduction); - const xw = wx + warp.x; - const zw = wz + warp.z; - - const c = self.getContinentalness(xw, zw, reduction); - const e_val = self.getErosion(xw, zw, reduction); - const pv = self.getPeaksValleys(xw, zw, reduction); - const river_mask = self.getRiverMask(xw, zw, reduction); - const region_info = region_pkg.getRegion(self.continentalness_noise.params.seed, wx_i, wz_i); - - const cj_octaves: u16 = if (2 > reduction) 2 - @as(u16, reduction) else 1; - const coast_jitter = self.coast_jitter_noise.get2DOctaves(xw, zw, cj_octaves); - const c_jittered = clamp01(c + coast_jitter); - - const terrain_height = self.computeHeight(c_jittered, e_val, pv, xw, zw, river_mask, region_info, reduction); - data.heightmap[idx] = terrain_height; - } else { - // === Fallback: Compute from scratch === - const warp = self.computeWarp(wx, wz, reduction); - const xw = wx + warp.x; - const zw = wz + warp.z; - - const c = self.getContinentalness(xw, zw, reduction); - const e_val = self.getErosion(xw, zw, reduction); - const pv = self.getPeaksValleys(xw, zw, reduction); - const river_mask = self.getRiverMask(xw, zw, reduction); - const region_info = region_pkg.getRegion(self.continentalness_noise.params.seed, wx_i, wz_i); - - const cj_octaves: u16 = if (2 > reduction) 2 - @as(u16, reduction) else 1; - const coast_jitter = self.coast_jitter_noise.get2DOctaves(xw, zw, cj_octaves); - const c_jittered = clamp01(c + coast_jitter); - - const terrain_height = self.computeHeight(c_jittered, e_val, pv, xw, zw, river_mask, region_info, reduction); - data.heightmap[idx] = terrain_height; - const terrain_height_i: i32 = @intFromFloat(terrain_height); - const is_ocean_water = c_jittered < p.ocean_threshold; - - // Compute climate and pick biome - const altitude_offset: f32 = @max(0, terrain_height - @as(f32, @floatFromInt(p.sea_level))); - var temp = self.getTemperature(xw, zw, reduction); - temp = clamp01(temp - (altitude_offset / 512.0) * p.temp_lapse); - const hum = self.getHumidity(xw, zw, reduction); - - const climate = biome_mod.computeClimateParams(temp, hum, terrain_height_i, c_jittered, e_val, p.sea_level, 256); + const climate = biome_mod.computeClimateParams( + column.temperature, + column.humidity, + column.terrain_height_i, + column.continentalness, + column.erosion, + sea_level, + CHUNK_SIZE_Y, + ); - const ridge_mask = self.getRidgeFactor(xw, zw, c_jittered, reduction); - const structural = biome_mod.StructuralParams{ - .height = terrain_height_i, - .slope = 0, - .continentalness = c_jittered, - .ridge_mask = ridge_mask, - }; + const structural = biome_mod.StructuralParams{ + .height = column.terrain_height_i, + .slope = 0, + .continentalness = column.continentalness, + .ridge_mask = column.ridge_mask, + }; - const biome_id = biome_mod.selectBiomeWithConstraintsAndRiver(climate, structural, river_mask); - data.biomes[idx] = biome_id; - data.top_blocks[idx] = self.getSurfaceBlock(biome_id, is_ocean_water); - data.colors[idx] = biome_mod.getBiomeColor(biome_id); - } + const biome_id = biome_mod.selectBiomeWithConstraintsAndRiver(climate, structural, column.river_mask); + data.biomes[idx] = biome_id; + data.top_blocks[idx] = self.getSurfaceBlock(biome_id, column.is_ocean); + data.colors[idx] = biome_mod.getBiomeColor(biome_id); } } } - /// Convert SurfaceType enum to BlockType for LOD rendering - fn surfaceTypeToBlock(self: *const OverworldGenerator, surface_type: SurfaceType) BlockType { - _ = self; + fn surfaceTypeToBlock(_: *const OverworldGenerator, surface_type: SurfaceType) BlockType { return switch (surface_type) { .grass => .grass, .sand => .sand, @@ -966,10 +280,8 @@ pub const OverworldGenerator = struct { }; } - fn getSurfaceBlock(self: *const OverworldGenerator, biome_id: BiomeId, is_ocean: bool) BlockType { - _ = self; + fn getSurfaceBlock(_: *const OverworldGenerator, biome_id: BiomeId, is_ocean: bool) BlockType { if (is_ocean) return .sand; - return switch (biome_id) { .desert, .badlands => .sand, .snow_tundra, .snowy_mountains => .snow_block, @@ -978,716 +290,6 @@ pub const OverworldGenerator = struct { }; } - /// Generate chunk without worm caves (for LOD1 or when worms disabled) - fn generateWithoutWormCaves(self: *const OverworldGenerator, chunk: *Chunk, stop_flag: ?*const bool) void { - // Call the existing internal function with default/empty worm map - // For now, just call the regular generate - in the future this would skip worm generation - self.generate(chunk, stop_flag); - } - - fn computeWarp(self: *const OverworldGenerator, x: f32, z: f32, reduction: u8) struct { x: f32, z: f32 } { - const octaves: u16 = if (3 > reduction) 3 - @as(u16, reduction) else 1; - const offset_x = self.warp_noise_x.get2DOctaves(x, z, octaves); - const offset_z = self.warp_noise_z.get2DOctaves(x, z, octaves); - return .{ .x = offset_x, .z = offset_z }; - } - - fn getContinentalness(self: *const OverworldGenerator, x: f32, z: f32, reduction: u8) f32 { - // Slow octave reduction for structure - const octaves: u16 = if (4 > (reduction / 2)) 4 - @as(u16, (reduction / 2)) else 2; - const val = self.continentalness_noise.get2DOctaves(x, z, octaves); - return (val + 1.0) * 0.5; - } - - /// Map continentalness value (0-1) to explicit zone - /// Updated to match STRUCTURE-FIRST thresholds - pub fn getContinentalZone(self: *const OverworldGenerator, c: f32) ContinentalZone { - const p = self.params; - if (c < p.continental_deep_ocean_max) { // 0.20 - return .deep_ocean; - } else if (c < p.ocean_threshold) { // 0.30 - HARD ocean cutoff - return .ocean; - } else if (c < p.continental_coast_max) { // 0.55 - return .coast; - } else if (c < p.continental_inland_low_max) { // 0.75 - return .inland_low; - } else if (c < p.continental_inland_high_max) { // 0.90 - return .inland_high; - } else { - return .mountain_core; - } - } - - fn getErosion(self: *const OverworldGenerator, x: f32, z: f32, reduction: u8) f32 { - const octaves: u16 = if (4 > (reduction / 2)) 4 - @as(u16, (reduction / 2)) else 2; - const val = self.erosion_noise.get2DOctaves(x, z, octaves); - return (val + 1.0) * 0.5; - } - - fn getPeaksValleys(self: *const OverworldGenerator, x: f32, z: f32, reduction: u8) f32 { - // Ridged noise also needs reduction - const octaves: u16 = if (5 > reduction) 5 - @as(u16, reduction) else 1; - // Peaks noise is not configurednoise in original code? Wait, it is now. - // But ridged2D isn't in ConfiguredNoise. - // I should add ridged2D to ConfiguredNoise or use noise directly. - // For now, use noise directly but with reduced octaves. - return self.peaks_noise.noise.ridged2D(x, z, octaves, 2.0, 0.5, self.params.peaks_scale); - } - - fn getTemperature(self: *const OverworldGenerator, x: f32, z: f32, reduction: u8) f32 { - const p = self.params; - const macro_octaves: u16 = if (3 > (reduction / 2)) 3 - @as(u16, (reduction / 2)) else 2; - const local_octaves: u16 = if (2 > reduction) 2 - @as(u16, reduction) else 1; - const macro = self.temperature_noise.get2DNormalizedOctaves(x, z, macro_octaves); - const local = self.temperature_local_noise.get2DNormalizedOctaves(x, z, local_octaves); - var t = p.climate_macro_weight * macro + (1.0 - p.climate_macro_weight) * local; - t = (t - 0.5) * 2.2 + 0.5; - return clamp01(t); - } - - fn getHumidity(self: *const OverworldGenerator, x: f32, z: f32, reduction: u8) f32 { - const p = self.params; - const macro_octaves: u16 = if (3 > (reduction / 2)) 3 - @as(u16, (reduction / 2)) else 2; - const local_octaves: u16 = if (2 > reduction) 2 - @as(u16, reduction) else 1; - const macro = self.humidity_noise.get2DNormalizedOctaves(x, z, macro_octaves); - const local = self.humidity_local_noise.get2DNormalizedOctaves(x, z, local_octaves); - var h = p.climate_macro_weight * macro + (1.0 - p.climate_macro_weight) * local; - h = (h - 0.5) * 2.2 + 0.5; - return clamp01(h); - } - - fn getMountainMask(self: *const OverworldGenerator, pv: f32, e: f32, c: f32) f32 { - const p = self.params; - const inland = smoothstep(p.mount_inland_min, p.mount_inland_max, c); - const peak_factor = smoothstep(p.mount_peak_min, p.mount_peak_max, pv); - const rugged_factor = 1.0 - smoothstep(p.mount_rugged_min, p.mount_rugged_max, e); - return inland * peak_factor * rugged_factor; - } - - fn getRidgeFactor(self: *const OverworldGenerator, x: f32, z: f32, c: f32, reduction: u8) f32 { - const p = self.params; - const inland_factor = smoothstep(p.ridge_inland_min, p.ridge_inland_max, c); - const octaves: u32 = if (5 > reduction) 5 - reduction else 1; - const ridge_val = self.ridge_noise.noise.ridged2D(x, z, octaves, 2.0, 0.5, p.ridge_scale); - const sparsity_mask = smoothstep(p.ridge_sparsity - 0.15, p.ridge_sparsity + 0.15, ridge_val); - return inland_factor * sparsity_mask * ridge_val; - } - - /// Base height from continentalness - only called for LAND (c >= ocean_threshold) - fn getBaseHeight(self: *const OverworldGenerator, c: f32) f32 { - const p = self.params; - const sea: f32 = @floatFromInt(p.sea_level); - - // Coastal zone: 0.35 to 0.42 - rises from sea level - if (c < p.continental_coast_max) { - const range = p.continental_coast_max - p.ocean_threshold; - const t = (c - p.ocean_threshold) / range; - return sea + t * 8.0; // 0 to +8 blocks - } - - // Inland Low: 0.42 to 0.60 - plains/forests - if (c < p.continental_inland_low_max) { - const range = p.continental_inland_low_max - p.continental_coast_max; - const t = (c - p.continental_coast_max) / range; - return sea + 8.0 + t * 12.0; // +8 to +20 - } - - // Inland High: 0.60 to 0.75 - hills - if (c < p.continental_inland_high_max) { - const range = p.continental_inland_high_max - p.continental_inland_low_max; - const t = (c - p.continental_inland_low_max) / range; - return sea + 20.0 + t * 15.0; // +20 to +35 - } - - // Mountain Core: > 0.75 - const t = smoothstep(p.continental_inland_high_max, 1.0, c); - return sea + 35.0 + t * 25.0; // +35 to +60 - } - - /// STRUCTURE-FIRST height computation with V7-style multi-layer terrain. - /// The KEY change: Ocean is decided by continentalness ALONE. - /// Land uses blended terrain layers for varied terrain character. - /// Region constraints suppress/exaggerate features per role. - fn computeHeight(self: *const OverworldGenerator, c: f32, e: f32, pv: f32, x: f32, z: f32, river_mask: f32, region: RegionInfo, reduction: u8) f32 { - const p = self.params; - const sea: f32 = @floatFromInt(p.sea_level); - - // ============================================================ - // STEP 1: HARD OCEAN DECISION - // If continentalness < ocean_threshold, this is OCEAN. - // Return ocean depth and STOP. No land logic runs here. - // ============================================================ - if (c < p.ocean_threshold) { - // Ocean depth varies smoothly with continentalness - // c=0.0 -> deepest (-50 from sea) - // c=ocean_threshold -> shallow (-15 from sea) - const ocean_depth_factor = c / p.ocean_threshold; // 0..1 within ocean - const deep_ocean_depth = sea - 55.0; - const shallow_ocean_depth = sea - 12.0; - - // Very minimal seabed variation - oceans should be BORING - const sb_octaves: u32 = if (2 > reduction) 2 - reduction else 1; - const seabed_detail = self.seabed_noise.get2DOctaves(x, z, @intCast(sb_octaves)); - - return std.math.lerp(deep_ocean_depth, shallow_ocean_depth, ocean_depth_factor) + seabed_detail; - } - - // ============================================================ - // STEP 2: PATH SYSTEM (Priority Override) - // Movement paths override region suppression locally - // ============================================================ - const path_info = region_pkg.getPathInfo(self.continentalness_noise.params.seed, @as(i32, @intFromFloat(x)), @as(i32, @intFromFloat(z)), region); - var path_depth: f32 = 0.0; - var slope_suppress: f32 = 0.0; - - switch (path_info.path_type) { - .valley => { - // Valleys: lower terrain and reduce slope - path_depth = path_info.influence * VALLEY_DEPTH; - slope_suppress = path_info.influence * 0.6; - }, - .river => { - // Rivers: deeper channel - path_depth = path_info.influence * 15.0; - slope_suppress = path_info.influence * 0.8; - }, - .plains_corridor => { - // Plains corridors: very gentle - path_depth = path_info.influence * 2.0; - slope_suppress = path_info.influence * 0.9; - }, - .none => {}, - } - - // ============================================================ - // STEP 3: V7-STYLE MULTI-LAYER TERRAIN (Issue #105) - // Blend terrain_base and terrain_alt using height_select - // This creates varied terrain where different areas have - // noticeably different character (rolling vs flat vs hilly) - // ============================================================ - // Distant terrain uses reduced octaves to prevent aliasing (grainy look) - const base_height = self.terrain_base.get2DOctaves(x, z, self.terrain_base.params.octaves -| reduction); - const alt_height = self.terrain_alt.get2DOctaves(x, z, self.terrain_alt.params.octaves -| reduction); - const select = self.height_select.get2DOctaves(x, z, self.height_select.params.octaves -| reduction); - const persist = self.terrain_persist.get2DOctaves(x, z, self.terrain_persist.params.octaves -| reduction); - - // Apply persistence variation to both heights - const base_modulated = base_height * persist; - const alt_modulated = alt_height * persist; - - // Blend between base and alt using height_select - // select near 0 = more base terrain (rolling hills) - // select near 1 = more alt terrain (flatter) - const blend = clamp01((select + 8.0) / 16.0); - - // Apply region height multiplier - const mood_mult = region_pkg.getHeightMultiplier(region); - const v7_terrain = std.math.lerp(base_modulated, alt_modulated, blend) * mood_mult; - - // ============================================================ - // STEP 4: LAND - Combine V7 terrain with continental base - // Only reaches here if c >= ocean_threshold - // ============================================================ - var height = self.getBaseHeight(c) + v7_terrain - path_depth; - - // ============================================================ - // STEP 5: Mountains & Ridges - REGION-CONSTRAINED - // Only apply if allowHeightDrama is true - // ============================================================ - if (region_pkg.allowHeightDrama(region) and c > p.continental_inland_low_max) { - const m_mask = self.getMountainMask(pv, e, c); - const lift_octaves: u32 = if (3 > reduction) 3 - reduction else 1; - const lift_noise = (self.mountain_lift_noise.get2DOctaves(x, z, @intCast(lift_octaves)) + 1.0) * 0.5; - const mount_lift = (m_mask * lift_noise * p.mount_amp) / (1.0 + (m_mask * lift_noise * p.mount_amp) / p.mount_cap); - height += mount_lift * mood_mult; - - const ridge_val = self.getRidgeFactor(x, z, c, reduction); - height += ridge_val * p.ridge_amp * mood_mult; - } - - // ============================================================ - // STEP 6: Fine Detail - Attenuated by slope suppression - // ============================================================ - const erosion_smooth = smoothstep(0.5, 0.75, e); - const land_factor = smoothstep(p.continental_coast_max, p.continental_inland_low_max, c); - const hills_atten = (1.0 - erosion_smooth) * land_factor * (1.0 - slope_suppress); - - // Small-scale detail (every ~32 blocks) - const elev01 = clamp01((height - sea) / p.highland_range); - const detail_atten = 1.0 - smoothstep(0.3, 0.85, elev01); - - // Dampen detail for LODs to prevent graininess - const det_octaves: u32 = if (3 > reduction) 3 - reduction else 1; - const detail_lod_mult = (1.0 - 0.25 * @as(f32, @floatFromInt(reduction))); - const detail = self.detail_noise.get2DOctaves(x, z, @intCast(det_octaves)) * detail_lod_mult; - height += detail * detail_atten * hills_atten * mood_mult; - - // ============================================================ - // STEP 7: Post-Processing - Peak compression - // ============================================================ - const peak_start = sea + p.peak_compression_offset; - if (height > peak_start) { - const h_above = height - peak_start; - const compressed = p.peak_compression_range * (1.0 - std.math.exp(-h_above / p.peak_compression_range)); - height = peak_start + compressed; - } - - // ============================================================ - // STEP 8: River Carving - REGION-CONSTRAINED - // Only if allowRiver is true - // ============================================================ - if (region_pkg.allowRiver(region) and river_mask > 0.001 and c > p.continental_coast_max) { - const river_bed = sea - 4.0; - const carve_alpha = smoothstep(0.0, 1.0, river_mask); - if (height > river_bed) { - height = std.math.lerp(height, river_bed, carve_alpha); - } - } - - return height; - } - - fn getRiverMask(self: *const OverworldGenerator, x: f32, z: f32, reduction: u8) f32 { - const p = self.params; - const octaves: u32 = if (4 > reduction) 4 - reduction else 1; - const r = self.river_noise.noise.ridged2D(x, z, octaves, 2.0, 0.5, p.river_scale); - const river_val = 1.0 - r; - return smoothstep(p.river_min, p.river_max, river_val); - } - - // CoastalSurfaceType is now imported from surface_builder.zig (Issue #147) - - /// Determine coastal surface type based on structural signals - /// - /// KEY FIX (Issue #92): Beach requires adjacency to OCEAN water, not just any water. - /// - Ocean water: continentalness < ocean_threshold (0.30) - /// - Inland water (lakes/rivers): continentalness >= ocean_threshold but below sea level - /// - /// Beach forms ONLY when: - /// 1. This block is LAND (above sea level) - /// 2. This block is near OCEAN (continentalness indicates ocean proximity) - /// 3. Height is within beach_max_height_above_sea of sea level - /// 4. Slope is gentle - /// - /// Inland water (lakes/rivers) get grass/dirt banks, NOT sand. - pub fn getCoastalSurfaceType(self: *const OverworldGenerator, continentalness: f32, slope: i32, height: i32, erosion: f32) CoastalSurfaceType { - const p = self.params; - const sea_level = p.sea_level; - - // CONSTRAINT 1: Height above sea level - // Beaches only exist in a tight band around sea level - const height_above_sea = height - sea_level; - - // If underwater or more than 3 blocks above sea, never a beach - if (height_above_sea < -1 or height_above_sea > p.beach_max_height_above_sea) { - return .none; - } - - // CONSTRAINT 2: Must be adjacent to OCEAN - // Beach only in a VERY narrow band just above ocean threshold - const beach_band = 0.05; // Only 0.05 continentalness = ~100 blocks at this scale - const near_ocean = continentalness >= p.ocean_threshold and - continentalness < (p.ocean_threshold + beach_band); - - if (!near_ocean) { - return .none; - } - - // CONSTRAINT 3: Classify based on slope and erosion - // Steep slopes become cliffs (stone) - if (slope >= p.cliff_min_slope) { - return .cliff; - } - - // High erosion areas become gravel beaches - if (erosion >= p.gravel_erosion_threshold and slope <= p.beach_max_slope + 1) { - return .gravel_beach; - } - - // Gentle slopes at sea level become sand beaches - if (slope <= p.beach_max_slope) { - return .sand_beach; - } - - // Moderate slopes - no special treatment - return .none; - } - - /// Check if a position is ocean water (used for beach adjacency checks) - /// Ocean = continentalness < ocean_threshold (structure-first definition) - pub fn isOceanWater(self: *const OverworldGenerator, wx: f32, wz: f32) bool { - const p = self.params; - const warp = self.computeWarp(wx, wz, 0); - const xw = wx + warp.x; - const zw = wz + warp.z; - const c = self.getContinentalness(xw, zw, 0); - - // Ocean is defined by continentalness alone in structure-first approach - return c < p.ocean_threshold; - } - - /// Check if a position is inland water (lake/river) - /// Inland water = underwater BUT continentalness >= ocean_threshold - pub fn isInlandWater(self: *const OverworldGenerator, wx: f32, wz: f32, height: i32) bool { - const p = self.params; - const warp = self.computeWarp(wx, wz, 0); - const xw = wx + warp.x; - const zw = wz + warp.z; - const c = self.getContinentalness(xw, zw, 0); - - // Inland water: below sea level but in a land zone - return height < p.sea_level and c >= p.ocean_threshold; - } - - /// Get block type at a specific Y coordinate - /// - /// KEY FIX: Distinguish between ocean floor and inland water floor: - /// - Ocean floor: sand in shallow water, gravel/clay in deep water - /// - Inland water floor (lakes/rivers): dirt/gravel, NOT sand (no lake beaches) - fn getBlockAt(self: *const OverworldGenerator, y: i32, terrain_height: i32, biome: Biome, filler_depth: i32, is_ocean_water: bool, is_underwater: bool, sea: f32) BlockType { - _ = self; - const sea_level: i32 = @intFromFloat(sea); - if (y == 0) return .bedrock; - if (y > terrain_height) { - if (y <= sea_level) return .water; - return .air; - } - - // Ocean floor: sand in shallow water, clay/gravel in deep - if (is_ocean_water and is_underwater and y == terrain_height) { - const depth: f32 = sea - @as(f32, @floatFromInt(terrain_height)); - if (depth <= 12) return .sand; // Shallow ocean: sand - if (depth <= 30) return .clay; // Medium depth: clay - return .gravel; // Deep: gravel - } - // Ocean shallow underwater filler for continuity - if (is_ocean_water and is_underwater and y > terrain_height - 3) { - const depth: f32 = sea - @as(f32, @floatFromInt(terrain_height)); - if (depth <= 12) return .sand; - } - - // INLAND WATER (lakes/rivers): dirt/gravel banks, NOT sand - // This prevents "lake beaches" - inland water should look natural - if (!is_ocean_water and is_underwater and y == terrain_height) { - const depth: f32 = sea - @as(f32, @floatFromInt(terrain_height)); - if (depth <= 8) return .dirt; // Shallow lake: dirt banks - if (depth <= 20) return .gravel; // Medium: gravel - return .clay; // Deep lake: clay - } - - if (y == terrain_height) { - // Elevation-aware surface morphing (Issue #110) - // Plains -> Grassland (low) -> Rolling Hills (mid) -> Windswept/Rocky (high) - if (biome == .plains) { - if (y > 110) return .stone; // High windswept areas - if (y > 90) return .gravel; // Transition - } - // Forest -> Standard -> Rocky peaks - if (biome == .forest) { - if (y > 120) return .stone; - } - - if (biome == .snowy_mountains or biome == .snow_tundra) return .snow_block; - return biome.getSurfaceBlock(); - } - if (y > terrain_height - filler_depth) return biome.getFillerBlock(); - return .stone; - } - - fn generateOres(self: *const OverworldGenerator, chunk: *Chunk) void { - var prng = std.Random.DefaultPrng.init(self.erosion_noise.params.seed +% @as(u64, @bitCast(@as(i64, chunk.chunk_x))) *% 59381 +% @as(u64, @bitCast(@as(i64, chunk.chunk_z))) *% 28411); - const random = prng.random(); - self.placeOreVeins(chunk, .coal_ore, 20, 6, 10, 128, random); - self.placeOreVeins(chunk, .iron_ore, 10, 4, 5, 64, random); - self.placeOreVeins(chunk, .gold_ore, 3, 3, 2, 32, random); - self.placeOreVeins(chunk, .glowstone, 8, 4, 5, 40, random); - } - - fn placeOreVeins(self: *const OverworldGenerator, chunk: *Chunk, block: BlockType, count: u32, size: u32, min_y: i32, max_y: i32, random: std.Random) void { - _ = self; - for (0..count) |_| { - const cx = random.uintLessThan(u32, CHUNK_SIZE_X); - const cz = random.uintLessThan(u32, CHUNK_SIZE_Z); - const range = max_y - min_y; - if (range <= 0) continue; - const cy = min_y + @as(i32, @intCast(random.uintLessThan(u32, @intCast(range)))); - const vein_size = random.uintLessThan(u32, size) + 2; - var i: u32 = 0; - while (i < vein_size) : (i += 1) { - const ox = @as(i32, @intCast(random.uintLessThan(u32, 4))) - 2; - const oy = @as(i32, @intCast(random.uintLessThan(u32, 4))) - 2; - const oz = @as(i32, @intCast(random.uintLessThan(u32, 4))) - 2; - const tx = @as(i32, @intCast(cx)) + ox; - const ty = cy + oy; - const tz = @as(i32, @intCast(cz)) + oz; - if (chunk.getBlockSafe(tx, ty, tz) == .stone) { - if (tx >= 0 and tx < CHUNK_SIZE_X and ty >= 0 and ty < CHUNK_SIZE_Y and tz >= 0 and tz < CHUNK_SIZE_Z) chunk.setBlock(@intCast(tx), @intCast(ty), @intCast(tz), block); - } - } - } - } - - pub fn generateFeatures(self: *const OverworldGenerator, chunk: *Chunk) void { - var prng = std.Random.DefaultPrng.init(self.continentalness_noise.params.seed ^ @as(u64, @bitCast(@as(i64, chunk.chunk_x))) ^ (@as(u64, @bitCast(@as(i64, chunk.chunk_z))) << 32)); - const random = prng.random(); - - // Calculate region info for whole chunk (approx) - const wx_center = chunk.getWorldX() + 8; - const wz_center = chunk.getWorldZ() + 8; - const region = region_pkg.getRegion(self.continentalness_noise.params.seed, wx_center, wz_center); - - // Region-based vegetation multiplier (Transit=25%, Boundary=15%, Destination=themed) - const veg_mult = region_pkg.getVegetationMultiplier(region); - - // Region-based feature suppression - const allow_subbiomes = region_pkg.allowSubBiomes(region); - - var local_z: u32 = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const surface_y = chunk.getSurfaceHeight(local_x, local_z); - if (surface_y <= 0 or surface_y >= CHUNK_SIZE_Y - 1) continue; - - // Use the biome stored in the chunk - const biome = chunk.biomes[local_x + local_z * CHUNK_SIZE_X]; - - // Sample variant noise for sub-biomes - const wx: f32 = @floatFromInt(chunk.getWorldX() + @as(i32, @intCast(local_x))); - const wz: f32 = @floatFromInt(chunk.getWorldZ() + @as(i32, @intCast(local_z))); - const variant_val = self.variant_noise.get2D(wx, wz); - - // Get surface block to check if we can place on it - const surface_block = chunk.getBlock(local_x, @intCast(surface_y), local_z); - - // Try decorations - self.decoration_provider.decorate( - chunk, - local_x, - local_z, - @intCast(surface_y), - surface_block, - biome, - variant_val, - allow_subbiomes, - veg_mult, - random, - ); - } - } - } - - pub fn computeSkylight(self: *const OverworldGenerator, chunk: *Chunk) void { - _ = self; - var local_z: u32 = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - var sky_light: u4 = MAX_LIGHT; - var y: i32 = CHUNK_SIZE_Y - 1; - while (y >= 0) : (y -= 1) { - const uy: u32 = @intCast(y); - const block = chunk.getBlock(local_x, uy, local_z); - chunk.setSkyLight(local_x, uy, local_z, sky_light); - if (block_registry.getBlockDefinition(block).isOpaque()) { - sky_light = 0; - } else if (block == .water and sky_light > 0) { - sky_light -= 1; - } - } - } - } - } - - const LightNode = struct { - x: u8, - y: u16, - z: u8, - r: u4, - g: u4, - b: u4, - }; - - pub fn computeBlockLight(self: *const OverworldGenerator, chunk: *Chunk) !void { - var queue = std.ArrayListUnmanaged(LightNode){}; - defer queue.deinit(self.allocator); - var local_z: u32 = 0; - while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { - var y: u32 = 0; - while (y < CHUNK_SIZE_Y) : (y += 1) { - var local_x: u32 = 0; - while (local_x < CHUNK_SIZE_X) : (local_x += 1) { - const block = chunk.getBlock(local_x, y, local_z); - const emission = block_registry.getBlockDefinition(block).light_emission; - if (emission[0] > 0 or emission[1] > 0 or emission[2] > 0) { - chunk.setBlockLightRGB(local_x, y, local_z, emission[0], emission[1], emission[2]); - try queue.append(self.allocator, .{ - .x = @intCast(local_x), - .y = @intCast(y), - .z = @intCast(local_z), - .r = emission[0], - .g = emission[1], - .b = emission[2], - }); - } - } - } - } - var head: usize = 0; - while (head < queue.items.len) : (head += 1) { - const node = queue.items[head]; - const neighbors = [6][3]i32{ .{ 1, 0, 0 }, .{ -1, 0, 0 }, .{ 0, 1, 0 }, .{ 0, -1, 0 }, .{ 0, 0, 1 }, .{ 0, 0, -1 } }; - for (neighbors) |offset| { - const nx = @as(i32, node.x) + offset[0]; - const ny = @as(i32, node.y) + offset[1]; - const nz = @as(i32, node.z) + offset[2]; - if (nx >= 0 and nx < CHUNK_SIZE_X and ny >= 0 and ny < CHUNK_SIZE_Y and nz >= 0 and nz < CHUNK_SIZE_Z) { - const ux: u32 = @intCast(nx); - const uy: u32 = @intCast(ny); - const uz: u32 = @intCast(nz); - const block = chunk.getBlock(ux, uy, uz); - if (!block_registry.getBlockDefinition(block).isOpaque()) { - const current_light = chunk.getLight(ux, uy, uz); - const current_r = current_light.getBlockLightR(); - const current_g = current_light.getBlockLightG(); - const current_b = current_light.getBlockLightB(); - - const next_r: u4 = if (node.r > 1) node.r - 1 else 0; - const next_g: u4 = if (node.g > 1) node.g - 1 else 0; - const next_b: u4 = if (node.b > 1) node.b - 1 else 0; - - if (next_r > current_r or next_g > current_g or next_b > current_b) { - const new_r = @max(next_r, current_r); - const new_g = @max(next_g, current_g); - const new_b = @max(next_b, current_b); - chunk.setBlockLightRGB(ux, uy, uz, new_r, new_g, new_b); - try queue.append(self.allocator, .{ - .x = @intCast(nx), - .y = @intCast(ny), - .z = @intCast(nz), - .r = new_r, - .g = new_g, - .b = new_b, - }); - } - } - } - } - } - } - - // ========================================================================= - // Biome Edge Detection (Issue #102) - // ========================================================================= - - /// Sample biome at arbitrary world coordinates (deterministic, no chunk dependency) - /// This is a lightweight version of getColumnInfo for edge detection sampling - pub fn sampleBiomeAtWorld(self: *const OverworldGenerator, wx: i32, wz: i32) BiomeId { - const p = self.params; - const wxf: f32 = @floatFromInt(wx); - const wzf: f32 = @floatFromInt(wz); - - // Compute warped coordinates - const warp = self.computeWarp(wxf, wzf, 0); // sampleBiome always uses full detail - const xw = wxf + warp.x; - const zw = wzf + warp.z; - - // Get structural parameters - const c = self.getContinentalness(xw, zw, 0); - const e = self.getErosion(xw, zw, 0); - const pv = self.getPeaksValleys(xw, zw, 0); - const coast_jitter = self.coast_jitter_noise.get2DOctaves(xw, zw, 2); - const c_jittered = clamp01(c + coast_jitter); - const river_mask = self.getRiverMask(xw, zw, 0); - - // Get region for height calculation - const region = region_pkg.getRegion(self.continentalness_noise.params.seed, wx, wz); - - // Compute height for climate calculation - const terrain_height = self.computeHeight(c_jittered, e, pv, xw, zw, river_mask, region, 0); - const terrain_height_i: i32 = @intFromFloat(terrain_height); - const sea: f32 = @floatFromInt(p.sea_level); - - // Get climate parameters - const altitude_offset: f32 = @max(0, terrain_height - sea); - var temperature = self.getTemperature(xw, zw, 0); - temperature = clamp01(temperature - (altitude_offset / 512.0) * p.temp_lapse); - const humidity = self.getHumidity(xw, zw, 0); - - // Build climate params - const climate = biome_mod.computeClimateParams( - temperature, - humidity, - terrain_height_i, - c_jittered, - e, - p.sea_level, - CHUNK_SIZE_Y, - ); - - // Structural params (simplified - no slope calculation for sampling) - const ridge_mask = self.getRidgeFactor(xw, zw, c_jittered, 0); - const structural = biome_mod.StructuralParams{ - .height = terrain_height_i, - .slope = 1, // Assume low slope for sampling - .continentalness = c_jittered, - .ridge_mask = ridge_mask, - }; - - return biome_mod.selectBiomeWithConstraintsAndRiver(climate, structural, river_mask); - } - - /// Detect if a position is near a biome boundary that needs a transition zone - /// Returns edge info including the neighboring biome and proximity band - pub fn detectBiomeEdge( - self: *const OverworldGenerator, - wx: i32, - wz: i32, - center_biome: BiomeId, - ) biome_mod.BiomeEdgeInfo { - var detected_neighbor: ?BiomeId = null; - var closest_band: biome_mod.EdgeBand = .none; - - // Check at each radius (4, 8, 12 blocks) - from closest to farthest - for (biome_mod.EDGE_CHECK_RADII, 0..) |radius, band_idx| { - const r: i32 = @intCast(radius); - const offsets = [_][2]i32{ - .{ r, 0 }, // East - .{ -r, 0 }, // West - .{ 0, r }, // South - .{ 0, -r }, // North - }; - - for (offsets) |off| { - const neighbor_biome = self.sampleBiomeAtWorld(wx + off[0], wz + off[1]); - - // Check if this neighbor differs and needs a transition - if (neighbor_biome != center_biome and biome_mod.needsTransition(center_biome, neighbor_biome)) { - detected_neighbor = neighbor_biome; - // Band index: 0=4 blocks (inner), 1=8 blocks (middle), 2=12 blocks (outer) - // EdgeBand: inner=3, middle=2, outer=1 - closest_band = @enumFromInt(3 - @as(u2, @intCast(band_idx))); - break; - } - } - - // If we found an edge at this radius, stop checking farther radii - if (detected_neighbor != null) break; - } - - return .{ - .base_biome = center_biome, - .neighbor_biome = detected_neighbor, - .edge_band = closest_band, - }; - } - - // ========================================================================= - // Classification Cache Population (Issue #119 Phase 2) - // ========================================================================= - - /// Populate classification cache with authoritative biome/surface/water data. - /// Called during LOD0 generation so LOD1-3 can sample consistent values. fn populateClassificationCache( self: *OverworldGenerator, world_x: i32, @@ -1698,9 +300,9 @@ pub const OverworldGenerator = struct { is_ocean_water_flags: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]bool, coastal_types: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]CoastalSurfaceType, ) void { - const p = self.params; + const sea_level = self.terrain_shape.getSeaLevel(); + const region_seed = self.terrain_shape.getRegionSeed(); - // Populate cache for each block in this chunk var local_z: u32 = 0; while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { var local_x: u32 = 0; @@ -1708,8 +310,6 @@ pub const OverworldGenerator = struct { const idx = local_x + local_z * CHUNK_SIZE_X; const wx = world_x + @as(i32, @intCast(local_x)); const wz = world_z + @as(i32, @intCast(local_z)); - - // Skip if already cached (shouldn't happen often, but be safe) if (self.classification_cache.has(wx, wz)) continue; const biome_id = biome_ids[idx]; @@ -1718,26 +318,22 @@ pub const OverworldGenerator = struct { const is_ocean = is_ocean_water_flags[idx]; const coastal_type = coastal_types[idx]; - // Derive surface type from biome and coastal classification const surface_type = self.deriveSurfaceTypeInternal( biome_id, height, + sea_level, is_ocean, coastal_type, ); - // Get continental zone - const continental_zone = self.getContinentalZone(continentalness); + const continental_zone = self.terrain_shape.getContinentalZone(continentalness); + const region_info = region_pkg.getRegion(region_seed, wx, wz); + const path_info = region_pkg.getPathInfo(region_seed, wx, wz, region_info); - // Get region info for role - const region_info = region_pkg.getRegion(self.continentalness_noise.params.seed, wx, wz); - const path_info = region_pkg.getPathInfo(self.continentalness_noise.params.seed, wx, wz, region_info); - - // Store in cache self.classification_cache.put(wx, wz, .{ .biome_id = biome_id, .surface_type = surface_type, - .is_water = height < p.sea_level, + .is_water = height < sea_level, .continental_zone = continental_zone, .region_role = region_info.role, .path_type = path_info.path_type, @@ -1746,21 +342,17 @@ pub const OverworldGenerator = struct { } } - /// Derive surface type from biome and terrain parameters (internal helper) fn deriveSurfaceTypeInternal( - self: *const OverworldGenerator, + _: *const OverworldGenerator, biome_id: BiomeId, height: i32, + sea_level: i32, is_ocean: bool, coastal_type: CoastalSurfaceType, ) SurfaceType { - const p = self.params; - - // Water cases - if (is_ocean and height < p.sea_level - 30) return .water_deep; - if (is_ocean and height < p.sea_level) return .water_shallow; + if (is_ocean and height < sea_level - 30) return .water_deep; + if (is_ocean and height < sea_level) return .water_shallow; - // Coastal overrides switch (coastal_type) { .sand_beach => return .sand, .gravel_beach => return .rock, @@ -1768,7 +360,6 @@ pub const OverworldGenerator = struct { .none => {}, } - // Biome-based surface return switch (biome_id) { .desert, .badlands, .beach => .sand, .snow_tundra, .snowy_mountains => .snow, diff --git a/src/world/worldgen/terrain_shape_generator.zig b/src/world/worldgen/terrain_shape_generator.zig new file mode 100644 index 00000000..3f4e036a --- /dev/null +++ b/src/world/worldgen/terrain_shape_generator.zig @@ -0,0 +1,445 @@ +const std = @import("std"); +const noise_mod = @import("noise.zig"); +const clamp01 = noise_mod.clamp01; +const CaveSystem = @import("caves.zig").CaveSystem; +pub const CaveCarveMap = @import("caves.zig").CaveCarveMap; +const biome_mod = @import("biome.zig"); +const BiomeId = biome_mod.BiomeId; +const BiomeSource = biome_mod.BiomeSource; +const region_pkg = @import("region.zig"); +const RegionInfo = region_pkg.RegionInfo; +const world_class = @import("world_class.zig"); +const ContinentalZone = world_class.ContinentalZone; +const Chunk = @import("../chunk.zig").Chunk; +const CHUNK_SIZE_X = @import("../chunk.zig").CHUNK_SIZE_X; +const CHUNK_SIZE_Y = @import("../chunk.zig").CHUNK_SIZE_Y; +const CHUNK_SIZE_Z = @import("../chunk.zig").CHUNK_SIZE_Z; +const Biome = @import("../block.zig").Biome; +const noise_sampler_mod = @import("noise_sampler.zig"); +pub const NoiseSampler = noise_sampler_mod.NoiseSampler; +const height_sampler_mod = @import("height_sampler.zig"); +pub const HeightSampler = height_sampler_mod.HeightSampler; +const surface_builder_mod = @import("surface_builder.zig"); +pub const SurfaceBuilder = surface_builder_mod.SurfaceBuilder; +pub const CoastalSurfaceType = surface_builder_mod.CoastalSurfaceType; +const CoastalGenerator = @import("coastal_generator.zig").CoastalGenerator; + +pub const Params = struct { + temp_lapse: f32 = 0.25, + sea_level: i32 = 64, + ocean_threshold: f32 = 0.35, + ridge_inland_min: f32 = 0.50, + ridge_inland_max: f32 = 0.70, + ridge_sparsity: f32 = 0.50, +}; + +pub const ColumnData = struct { + terrain_height: f32, + terrain_height_i: i32, + continentalness: f32, + erosion: f32, + river_mask: f32, + temperature: f32, + humidity: f32, + ridge_mask: f32, + is_underwater: bool, + is_ocean: bool, + cave_region: f32, +}; + +pub const ChunkPhaseData = struct { + surface_heights: [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32, + biome_ids: [CHUNK_SIZE_X * CHUNK_SIZE_Z]BiomeId, + secondary_biome_ids: [CHUNK_SIZE_X * CHUNK_SIZE_Z]BiomeId, + biome_blends: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + filler_depths: [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32, + is_underwater_flags: [CHUNK_SIZE_X * CHUNK_SIZE_Z]bool, + is_ocean_water_flags: [CHUNK_SIZE_X * CHUNK_SIZE_Z]bool, + cave_region_values: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + continentalness_values: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + erosion_values: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + ridge_masks: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + river_masks: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + temperatures: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + humidities: [CHUNK_SIZE_X * CHUNK_SIZE_Z]f32, + slopes: [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32, + coastal_types: [CHUNK_SIZE_X * CHUNK_SIZE_Z]CoastalSurfaceType, +}; + +pub const TerrainShapeGenerator = struct { + noise_sampler: NoiseSampler, + height_sampler: HeightSampler, + surface_builder: SurfaceBuilder, + biome_source: BiomeSource, + cave_system: CaveSystem, + coastal_generator: CoastalGenerator, + params: Params, + + pub fn init(seed: u64) TerrainShapeGenerator { + return initWithParams(seed, .{}); + } + + pub fn initWithParams(seed: u64, params: Params) TerrainShapeGenerator { + const p = params; + return .{ + .noise_sampler = NoiseSampler.init(seed), + .height_sampler = HeightSampler.init(), + .surface_builder = SurfaceBuilder.init(), + .biome_source = BiomeSource.init(), + .cave_system = CaveSystem.init(seed), + .coastal_generator = CoastalGenerator.init(p.ocean_threshold), + .params = p, + }; + } + + pub fn getSeed(self: *const TerrainShapeGenerator) u64 { + return self.noise_sampler.getSeed(); + } + + pub fn getRegionSeed(self: *const TerrainShapeGenerator) u64 { + return self.noise_sampler.continentalness_noise.params.seed; + } + + pub fn getSeaLevel(self: *const TerrainShapeGenerator) i32 { + return self.params.sea_level; + } + + pub fn getOceanThreshold(self: *const TerrainShapeGenerator) f32 { + return self.params.ocean_threshold; + } + + pub fn getContinentalZone(self: *const TerrainShapeGenerator, c: f32) ContinentalZone { + return self.height_sampler.getContinentalZone(c); + } + + pub fn getNoiseSampler(self: *const TerrainShapeGenerator) *const NoiseSampler { + return &self.noise_sampler; + } + + pub fn getHeightSampler(self: *const TerrainShapeGenerator) *const HeightSampler { + return &self.height_sampler; + } + + pub fn getSurfaceBuilder(self: *const TerrainShapeGenerator) *const SurfaceBuilder { + return &self.surface_builder; + } + + pub fn getBiomeSource(self: *const TerrainShapeGenerator) *const BiomeSource { + return &self.biome_source; + } + + pub fn sampleColumnData(self: *const TerrainShapeGenerator, wx: f32, wz: f32, reduction: u8) ColumnData { + const sea: f32 = @floatFromInt(self.params.sea_level); + var noise = self.noise_sampler.sampleColumn(wx, wz, reduction); + const cj_octaves: u16 = if (2 > reduction) 2 - @as(u16, reduction) else 1; + const coast_jitter = self.noise_sampler.coast_jitter_noise.get2DOctaves(noise.warped_x, noise.warped_z, cj_octaves); + const c_jittered = CoastalGenerator.applyCoastJitter(noise.continentalness, coast_jitter); + noise.continentalness = c_jittered; + noise.river_mask = self.noise_sampler.getRiverMask(noise.warped_x, noise.warped_z, reduction); + + const region_seed = self.getRegionSeed(); + const wx_i: i32 = @intFromFloat(wx); + const wz_i: i32 = @intFromFloat(wz); + const region = region_pkg.getRegion(region_seed, wx_i, wz_i); + const path_info = region_pkg.getPathInfo(region_seed, wx_i, wz_i, region); + const terrain_height = self.height_sampler.computeHeight(&self.noise_sampler, noise, region, path_info, reduction); + const terrain_height_i: i32 = @intFromFloat(terrain_height); + + const altitude_offset: f32 = @max(0, terrain_height - sea); + var temperature = noise.temperature; + temperature = clamp01(temperature - (altitude_offset / 512.0) * self.params.temp_lapse); + + const ridge_params = NoiseSampler.RidgeParams{ + .inland_min = self.params.ridge_inland_min, + .inland_max = self.params.ridge_inland_max, + .sparsity = self.params.ridge_sparsity, + }; + const ridge_mask = self.noise_sampler.getRidgeFactor(noise.warped_x, noise.warped_z, c_jittered, reduction, ridge_params); + + return .{ + .terrain_height = terrain_height, + .terrain_height_i = terrain_height_i, + .continentalness = c_jittered, + .erosion = noise.erosion, + .river_mask = noise.river_mask, + .temperature = temperature, + .humidity = noise.humidity, + .ridge_mask = ridge_mask, + .is_underwater = terrain_height < sea, + .is_ocean = c_jittered < self.params.ocean_threshold, + .cave_region = self.cave_system.getCaveRegionValue(wx, wz), + }; + } + + pub fn prepareChunkPhaseData( + self: *const TerrainShapeGenerator, + phase_data: *ChunkPhaseData, + world_x: i32, + world_z: i32, + cache_center_x: i32, + cache_center_z: i32, + stop_flag: ?*const bool, + ) bool { + var local_z: u32 = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + if (stop_flag) |sf| if (sf.*) return false; + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + const idx = local_x + local_z * CHUNK_SIZE_X; + const wx: f32 = @floatFromInt(world_x + @as(i32, @intCast(local_x))); + const wz: f32 = @floatFromInt(world_z + @as(i32, @intCast(local_z))); + const column = self.sampleColumnData(wx, wz, 0); + + phase_data.surface_heights[idx] = column.terrain_height_i; + phase_data.is_underwater_flags[idx] = column.is_underwater; + phase_data.is_ocean_water_flags[idx] = column.is_ocean; + phase_data.cave_region_values[idx] = column.cave_region; + phase_data.temperatures[idx] = column.temperature; + phase_data.humidities[idx] = column.humidity; + phase_data.continentalness_values[idx] = column.continentalness; + phase_data.erosion_values[idx] = column.erosion; + phase_data.ridge_masks[idx] = column.ridge_mask; + phase_data.river_masks[idx] = column.river_mask; + } + } + + local_z = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + if (stop_flag) |sf| if (sf.*) return false; + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + const idx = local_x + local_z * CHUNK_SIZE_X; + const terrain_h = phase_data.surface_heights[idx]; + var max_slope: i32 = 0; + if (local_x > 0) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - phase_data.surface_heights[idx - 1])))); + if (local_x < CHUNK_SIZE_X - 1) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - phase_data.surface_heights[idx + 1])))); + if (local_z > 0) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - phase_data.surface_heights[idx - CHUNK_SIZE_X])))); + if (local_z < CHUNK_SIZE_Z - 1) max_slope = @max(max_slope, @as(i32, @intCast(@abs(terrain_h - phase_data.surface_heights[idx + CHUNK_SIZE_X])))); + phase_data.slopes[idx] = max_slope; + } + } + + local_z = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + if (stop_flag) |sf| if (sf.*) return false; + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + const idx = local_x + local_z * CHUNK_SIZE_X; + const climate = self.biome_source.computeClimate( + phase_data.temperatures[idx], + phase_data.humidities[idx], + phase_data.surface_heights[idx], + phase_data.continentalness_values[idx], + phase_data.erosion_values[idx], + CHUNK_SIZE_Y, + ); + + const structural = biome_mod.StructuralParams{ + .height = phase_data.surface_heights[idx], + .slope = phase_data.slopes[idx], + .continentalness = phase_data.continentalness_values[idx], + .ridge_mask = phase_data.ridge_masks[idx], + }; + + const biome_id = self.biome_source.selectBiome(climate, structural, phase_data.river_masks[idx]); + phase_data.biome_ids[idx] = biome_id; + phase_data.secondary_biome_ids[idx] = biome_id; + phase_data.biome_blends[idx] = 0.0; + } + } + + const EDGE_GRID_SIZE = CHUNK_SIZE_X / biome_mod.EDGE_STEP; + const player_dist_sq = (world_x - cache_center_x) * (world_x - cache_center_x) + + (world_z - cache_center_z) * (world_z - cache_center_z); + + if (player_dist_sq < 256 * 256) { + var gz: u32 = 0; + while (gz < EDGE_GRID_SIZE) : (gz += 1) { + if (stop_flag) |sf| if (sf.*) return false; + var gx: u32 = 0; + while (gx < EDGE_GRID_SIZE) : (gx += 1) { + const sample_x = gx * biome_mod.EDGE_STEP + biome_mod.EDGE_STEP / 2; + const sample_z = gz * biome_mod.EDGE_STEP + biome_mod.EDGE_STEP / 2; + const sample_idx = sample_x + sample_z * CHUNK_SIZE_X; + const base_biome = phase_data.biome_ids[sample_idx]; + const sample_wx = world_x + @as(i32, @intCast(sample_x)); + const sample_wz = world_z + @as(i32, @intCast(sample_z)); + const edge_info = self.detectBiomeEdge(sample_wx, sample_wz, base_biome); + + if (edge_info.edge_band != .none) { + if (edge_info.neighbor_biome) |neighbor| { + if (biome_mod.getTransitionBiome(base_biome, neighbor)) |transition_biome| { + var cell_z: u32 = 0; + while (cell_z < biome_mod.EDGE_STEP) : (cell_z += 1) { + var cell_x: u32 = 0; + while (cell_x < biome_mod.EDGE_STEP) : (cell_x += 1) { + const lx = gx * biome_mod.EDGE_STEP + cell_x; + const lz = gz * biome_mod.EDGE_STEP + cell_z; + if (lx < CHUNK_SIZE_X and lz < CHUNK_SIZE_Z) { + const cell_idx = lx + lz * CHUNK_SIZE_X; + phase_data.secondary_biome_ids[cell_idx] = phase_data.biome_ids[cell_idx]; + phase_data.biome_ids[cell_idx] = transition_biome; + phase_data.biome_blends[cell_idx] = switch (edge_info.edge_band) { + .inner => 0.3, + .middle => 0.2, + .outer => 0.1, + .none => 0.0, + }; + } + } + } + } + } + } + } + } + } + + local_z = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + if (stop_flag) |sf| if (sf.*) return false; + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + const idx = local_x + local_z * CHUNK_SIZE_X; + const biome_def = biome_mod.getBiomeDefinition(phase_data.biome_ids[idx]); + phase_data.filler_depths[idx] = biome_def.surface.depth_range; + phase_data.coastal_types[idx] = CoastalGenerator.getSurfaceType( + &self.surface_builder, + phase_data.continentalness_values[idx], + phase_data.slopes[idx], + phase_data.surface_heights[idx], + phase_data.erosion_values[idx], + ); + } + } + + return true; + } + + pub fn fillChunkBlocks( + self: *const TerrainShapeGenerator, + chunk: *Chunk, + phase_data: *const ChunkPhaseData, + worm_carve_map: ?*const CaveCarveMap, + stop_flag: ?*const bool, + ) bool { + const sea_level = self.params.sea_level; + const world_x = chunk.getWorldX(); + const world_z = chunk.getWorldZ(); + var local_z: u32 = 0; + while (local_z < CHUNK_SIZE_Z) : (local_z += 1) { + if (stop_flag) |sf| if (sf.*) return false; + var local_x: u32 = 0; + while (local_x < CHUNK_SIZE_X) : (local_x += 1) { + const idx = local_x + local_z * CHUNK_SIZE_X; + const terrain_height_i = phase_data.surface_heights[idx]; + const wx: f32 = @floatFromInt(world_x + @as(i32, @intCast(local_x))); + const wz: f32 = @floatFromInt(world_z + @as(i32, @intCast(local_z))); + const dither = self.noise_sampler.detail_noise.noise.perlin2D(wx * 0.02, wz * 0.02) * 0.5 + 0.5; + const use_secondary = dither < phase_data.biome_blends[idx]; + const active_biome_id = if (use_secondary) phase_data.secondary_biome_ids[idx] else phase_data.biome_ids[idx]; + const active_biome: Biome = @enumFromInt(@intFromEnum(active_biome_id)); + + chunk.setSurfaceHeight(local_x, local_z, @intCast(terrain_height_i)); + chunk.biomes[idx] = active_biome_id; + + var y: i32 = 0; + while (y < CHUNK_SIZE_Y) : (y += 1) { + var block = self.surface_builder.getSurfaceBlock( + y, + terrain_height_i, + active_biome, + phase_data.filler_depths[idx], + phase_data.is_ocean_water_flags[idx], + phase_data.is_underwater_flags[idx], + phase_data.coastal_types[idx], + ); + + if (block != .air and block != .water and block != .bedrock) { + const wy: f32 = @floatFromInt(y); + const should_carve_worm = if (worm_carve_map) |map| map.get(local_x, @intCast(y), local_z) else false; + const should_carve_cavity = self.cave_system.shouldCarve(wx, wy, wz, terrain_height_i, phase_data.cave_region_values[idx]); + if (should_carve_worm or should_carve_cavity) { + block = if (y < sea_level) .water else .air; + } + } + chunk.setBlock(local_x, @intCast(y), local_z, block); + } + } + } + + return true; + } + + pub fn generateWormCaves( + self: *const TerrainShapeGenerator, + chunk: *Chunk, + surface_heights: *const [CHUNK_SIZE_X * CHUNK_SIZE_Z]i32, + allocator: std.mem.Allocator, + ) !CaveCarveMap { + return self.cave_system.generateWormCaves(chunk, surface_heights, allocator); + } + + pub fn sampleBiomeAtWorld(self: *const TerrainShapeGenerator, wx: i32, wz: i32) BiomeId { + const wxf: f32 = @floatFromInt(wx); + const wzf: f32 = @floatFromInt(wz); + const column = self.sampleColumnData(wxf, wzf, 0); + const climate = self.biome_source.computeClimate( + column.temperature, + column.humidity, + column.terrain_height_i, + column.continentalness, + column.erosion, + CHUNK_SIZE_Y, + ); + const structural = biome_mod.StructuralParams{ + .height = column.terrain_height_i, + .slope = 1, + .continentalness = column.continentalness, + .ridge_mask = column.ridge_mask, + }; + return self.biome_source.selectBiome(climate, structural, column.river_mask); + } + + pub fn detectBiomeEdge( + self: *const TerrainShapeGenerator, + wx: i32, + wz: i32, + center_biome: BiomeId, + ) biome_mod.BiomeEdgeInfo { + var detected_neighbor: ?BiomeId = null; + var closest_band: biome_mod.EdgeBand = .none; + + for (biome_mod.EDGE_CHECK_RADII, 0..) |radius, band_idx| { + const r: i32 = @intCast(radius); + const offsets = [_][2]i32{ .{ r, 0 }, .{ -r, 0 }, .{ 0, r }, .{ 0, -r } }; + for (offsets) |off| { + const neighbor_biome = self.sampleBiomeAtWorld(wx + off[0], wz + off[1]); + if (neighbor_biome != center_biome and biome_mod.needsTransition(center_biome, neighbor_biome)) { + detected_neighbor = neighbor_biome; + closest_band = @enumFromInt(3 - @as(u2, @intCast(band_idx))); + break; + } + } + if (detected_neighbor != null) break; + } + + return .{ + .base_biome = center_biome, + .neighbor_biome = detected_neighbor, + .edge_band = closest_band, + }; + } + + pub fn getRegionInfo(self: *const TerrainShapeGenerator, world_x: i32, world_z: i32) RegionInfo { + return region_pkg.getRegion(self.getRegionSeed(), world_x, world_z); + } + + pub fn isOceanWater(self: *const TerrainShapeGenerator, wx: f32, wz: f32) bool { + return self.coastal_generator.isOceanWater(&self.noise_sampler, wx, wz); + } + + pub fn isInlandWater(self: *const TerrainShapeGenerator, wx: f32, wz: f32, height: i32) bool { + return self.coastal_generator.isInlandWater(&self.noise_sampler, wx, wz, height, self.params.sea_level); + } +};