diff --git a/.changeset/pointcloud-class-isolation.md b/.changeset/pointcloud-class-isolation.md new file mode 100644 index 000000000..18142164e --- /dev/null +++ b/.changeset/pointcloud-class-isolation.md @@ -0,0 +1,32 @@ +--- +"@ifc-lite/renderer": minor +"@ifc-lite/viewer": minor +--- + +Per-class visibility toggles for ASPRS-classified point clouds. + +A new "Classes" section in the point cloud panel exposes a checkbox +list of every LAS 1.4 standard class (Ground, Vegetation, Building, +Water, Wires, Bridge deck, ...). Toggling a class hides every point +with that classification. Works in any colour mode; the swatch +colours mirror the splat shader's classification palette so the UI +matches what's on screen. + +Implementation: +- New `pointCloudClassMask: number` (u32 bitmask, default + `0xFFFFFFFF`) on the point cloud slice. `togglePointCloudClass(id)` + flips a single bit; `setPointCloudClassMask(mask)` replaces all 32. +- `PointCloudRenderOptions.classMask` plumbed through the renderer. + Stored in uniform slot `flags.w` (was unused). +- Splat shader checks `(flags.w >> classId) & 1` per vertex; hidden + classes get a degenerate `clipPos = vec4(0, 0, -2, 1)` so they're + culled before rasterisation rather than wasted on a fragment-stage + discard. +- New `PointCloudClasses` component in the panel renders a + `
` collapsible with "Show all" + per-class toggles. A + badge surfaces "N of 32 visible" when not all are on. +- `usePointCloudSync` forwards the mask to + `setPointCloudOptions({ classMask })`. + +Class ids ≥32 always show — the mask only covers the standard +range. Custom-labelled scans need a richer UI (deferred). diff --git a/.changeset/pointcloud-deviation-heatmap.md b/.changeset/pointcloud-deviation-heatmap.md new file mode 100644 index 000000000..d3db772c1 --- /dev/null +++ b/.changeset/pointcloud-deviation-heatmap.md @@ -0,0 +1,72 @@ +--- +"@ifc-lite/renderer": minor +"@ifc-lite/viewer": minor +--- + +BIM ↔ scan deviation heatmap — GPU compute pipeline that colours each +scan point by signed distance to the nearest mesh surface. Works with +every IFC ingest path (STEP / IFCx / GLB / federated) and with every +point cloud format (inline IFCx + streamed LAS / LAZ / PLY / PCD / E57 +/ PTS / XYZ — anywhere `Scene.forEachMeshData` reaches and any node +the splat pipeline already renders). + +Pipeline: +1. **Per-triangle BVH** built from `Scene.forEachMeshData()` — + reaches every CPU-side `MeshData` regardless of source. Median + split along longest axis, max 16 tris per leaf, flattened to a + `Float32Array` of 32-byte nodes during the build (no second + pass). +2. **Two GPU storage buffers** — nodes + triangles — uploaded once + per mesh-set change. Cached by a `(meshCount, totalPositions)` + fingerprint so re-running deviation against the same model is a + pure dispatch. +3. **Compute shader** with stack-based BVH descent (workgroup-size + 64). Per point: descend BVH pruning by squared point-to-AABB + distance, run Ericson §5.1.5 closest-point-on-triangle on every + leaf candidate, output signed distance via the closest face's + precomputed normal. +4. **Per-chunk deviation buffer** allocated alongside the splat + vertex buffer (`STORAGE | VERTEX | COPY_DST`, 4 bytes per point, + zero-initialised). Compute reads the vertex buffer's positions + directly — no CPU copy of streamed clouds needed. +5. **Splat shader** gains a 2nd vertex buffer (location 4 = `f32` + deviation), a new `deviation` color mode, and a diverging + blue → white → red `deviation_ramp`. Uniform block grows by 16 + bytes (new `deviationRange: vec4` slot for centre + half- + range), `POINT_UNIFORM_SIZE` 208 → 224. +6. **Public API** — `Renderer.computeDeviations({ maxRange?, + forceRebuild? })` returns `{ bvhTriangles, bvhNodes, + chunksProcessed, pointsProcessed, bounds, suggestedHalfRange }`. + Awaits `queue.onSubmittedWorkDone` so callers see populated + buffers when the promise resolves. +7. **UI** — new `DeviationPanel` inside `PointCloudPanel`. Compute + button (gated on `triangleCount > 0`), live progress + duration + readout, range slider in millimetres (1 mm to 1 m), inline + blue-white-red legend. Auto-suggests a half-range from the BVH + bbox (±max-extent / 1000) and auto-switches the colour mode to + `deviation` on success. +8. **Slice** — `pointCloudColorMode` gains `'deviation'`, plus + `pointCloudDeviationCenterOffset`, `pointCloudDeviationHalfRange` + (default ±5 cm), and `pointCloudDeviationComputed`. Sync hook + forwards the range to the renderer uniform. + +Sign convention: positive = scan point is on the outward-normal +side of the closest triangle (typical "scan overshoots wall by +5 mm"). Negative = inside / behind. Non-watertight BIM (typical +IFC) means "inside the building" isn't globally defined, but +per-surface front/back is always meaningful. + +Limitations / future work: +- The dispatch processes every uploaded point against every + triangle in the scene; isolated / hidden meshes still contribute + to the BVH. A `meshFilter` predicate is a natural follow-up. +- Histogram + auto-range from p5/p95 not yet implemented — the + default half-range suggestion is a coarse bbox/1000 heuristic. + Phase B will add a 2nd compute pass with atomic histogram. +- The BVH walk uses a 64-deep per-thread stack. Pathologically + unbalanced trees (>64 deep) silently drop the deepest branch. + Real BIMs don't get there; SAH or surface-area cost would help + if we ever hit it. + +Verified: full repo typecheck (24/24), 655 viewer tests, viewer +Vite build green. diff --git a/.changeset/pointcloud-e57-multi-scan-pose.md b/.changeset/pointcloud-e57-multi-scan-pose.md new file mode 100644 index 000000000..e583c008d --- /dev/null +++ b/.changeset/pointcloud-e57-multi-scan-pose.md @@ -0,0 +1,34 @@ +--- +"@ifc-lite/pointcloud": minor +--- + +E57 multi-scan pose merging — registered files now load. + +Previously a multi-scan E57 with `` elements threw a clear +"re-export as merged" error. This change parses each Data3D's pose +(unit quaternion + translation) and applies it before merging, so +registered scans line up in the file's global frame. + +Implementation: +- `Data3DEntry.hasPose: boolean` → `Data3DEntry.pose?: E57Pose` + carrying `{ rotation: {w,x,y,z}, translation: {x,y,z} }`. +- New `parsePoseElement` walks the `` + structure; non-finite values fall through to identity rather than + rejecting the whole file. +- New exported `applyPoseInPlace(positions, count, pose)` derives the + 3×3 rotation matrix from the quaternion (Hamilton convention, + `w + xi + yj + zk`) and computes `out = R · in + T` per point. +- `decodeE57` applies the pose after `decodeE57Scan` returns and + recomputes bbox; identity / absent poses are no-ops. +- The "Multi-scan pose merging is not yet supported" rejection is + removed. + +3 new tests: +- Pose extraction from XML (90°-around-Z quaternion + finite + translation, plus a no-pose sibling). +- `applyPoseInPlace` with a 90°-around-Z + translation, asserting + per-axis transforms. +- Identity pose round-trips positions unchanged. + +Verified: 64 pointcloud unit tests pass, full repo typecheck (24/24), +viewer Vite build green. diff --git a/.changeset/pointcloud-e57-scaled-integer.md b/.changeset/pointcloud-e57-scaled-integer.md new file mode 100644 index 000000000..7571e5d37 --- /dev/null +++ b/.changeset/pointcloud-e57-scaled-integer.md @@ -0,0 +1,44 @@ +--- +"@ifc-lite/pointcloud": minor +"@ifc-lite/renderer": patch +--- + +E57 ScaledInteger codec — bit-packed cartesian / intensity / colour. + +ScaledInteger is the more compact encoding most real-world Faro, +Trimble, and Leica E57 exports use; previously we threw a clear +error on these files. This change implements the decoder so they +load directly. + +Per spec ASTM E2807-11 §6.3.4: +- `bitsPerRecord = ceil(log2(maximum - minimum + 1))` +- Bytestream stores `raw_int = original − minimum` packed LSB-first + within each byte; decoded float = `(raw_int + minimum) * scale + offset` + +Implementation: +- New `readBitsLE(bytes, bitOffset, bitsPerRecord)` walks a byte + buffer and reconstructs each value into a JS number using + `Math.pow(2, n)` instead of `<< n`, so precision holds up to 53 + bits (covers every real exporter — LiDAR + survey kit tops out + around 32 bits). Wider fields throw a clear error. +- `readCartesianStream` and `readIntensityStream` now branch on + field kind: Float / Integer paths unchanged, ScaledInteger path + bit-walks per record. +- `writeColorChannel` extended with a ScaledInteger branch that + remaps `raw → [0, 1]` via the declared min/max range. +- Per-axis packet capacity computation now varies by field kind + (Float = `length / byteSize`, ScaledInteger = `length * 8 / bitsPerRecord`) + via `floatOrSiPointCapacity`. + +The "ScaledInteger throws clearly" error is removed for cartesian, +intensity, and colour — all three now decode. The earlier multi-scan +pose rejection stays in place; that's a separate piece of work. + +2 new tests: +- 8-bit ScaledInteger across all three cartesian axes (round-trip + through known raw values). +- 12-bit ScaledInteger that crosses byte boundaries (proves the + bit-pack walk is correct for non-multiples-of-8). + +Verified: 63 pointcloud unit tests pass, full repo typecheck (24/24), +viewer Vite build green. diff --git a/.changeset/pointcloud-laz-wasm-mime.md b/.changeset/pointcloud-laz-wasm-mime.md new file mode 100644 index 000000000..ae8546017 --- /dev/null +++ b/.changeset/pointcloud-laz-wasm-mime.md @@ -0,0 +1,21 @@ +--- +"@ifc-lite/pointcloud": patch +"@ifc-lite/viewer": patch +--- + +Fix LAZ load failing with `WebAssembly: Response has unsupported MIME +type 'text/plain'` on real-world files (e.g. autzen-classified.laz). + +`laz-perf`'s emscripten shim resolves the wasm via `locateFile()` and +calls `fetch("laz-perf.wasm")` relative to its own script directory. +In a Vite-bundled module worker that path becomes `/assets//…` +or just `/laz-perf.wasm` — both 404, and the SPA fallback returns +`index.html` as `text/plain`, which `instantiateStreaming` rightly +rejects. The async fallback then 404s the same way and aborts. + +`loadLazPerf` now resolves the wasm asset URL through Vite's +`?url` import (`laz-perf/lib/web/laz-perf.wasm?url`), pre-fetches the +bytes itself, and hands them to emscripten as `Module.wasmBinary` so +the shim's own fetch is bypassed entirely. Failure modes (asset +resolution, fetch HTTP error) now produce a precise error message +naming the URL and status instead of the opaque emscripten "Aborted". diff --git a/.changeset/pointcloud-near-term-correctness.md b/.changeset/pointcloud-near-term-correctness.md new file mode 100644 index 000000000..187ddb27b --- /dev/null +++ b/.changeset/pointcloud-near-term-correctness.md @@ -0,0 +1,40 @@ +--- +"@ifc-lite/pointcloud": patch +"@ifc-lite/renderer": patch +"@ifc-lite/viewer": patch +--- + +Near-term batch — correctness + robustness items from #611. + +**`computeBBox` empty / non-finite guards.** Both `e57.ts` and +`ifcx-points.ts` now return `{0,0,0}/{0,0,0}` for empty arrays and +skip non-finite triplets. Previously a zero-point or NaN-poisoned +chunk produced ±Infinity bounds that broke camera fit-to-view and +section-plane sliders. + +**Magic-byte-first format detection.** `detectPointCloudFormat` now +probes the buffer (E57 magic, LASF magic, "ply" / "#" / ".PCD" +ASCII tokens) before falling back to extension. A LAS file +mistakenly named `*.ply` no longer goes down the wrong decoder. LAS +vs LAZ still uses the extension to disambiguate (they share the +LASF magic). + +**E57 packet-bounds + per-stream guards.** Validate that the +DataPacket header, bytestream-length table, and each individual +bytestream stay inside `payloadEnd = packetEnd - 4` before reading. +Corrupt files now fail with a precise "bytestream X runs past +packet payload" error instead of silently reading into the next +packet. + +**`e57.ts` split (631 → 4 files).** `e57-page.ts` (header / page CRC +/ section-header resolver), `e57-xml.ts` (prototype + Data3D +parser), `e57-decode.ts` (per-scan binary decoder), `e57.ts` +(orchestrator + re-exports). All four under the AGENTS ~400-line +guideline. + +**`point-cloud-renderer.ts` extract.** Pulled the uniform-block +writer into `point-cloud-uniforms.ts` (`writePointCloudUniforms` + +mode index maps). Renderer drops below 400 lines. + +Verified: 62 pointcloud unit tests pass, full repo typecheck +(24/24). diff --git a/.changeset/pointcloud-near-term-ux.md b/.changeset/pointcloud-near-term-ux.md new file mode 100644 index 000000000..d24d77d64 --- /dev/null +++ b/.changeset/pointcloud-near-term-ux.md @@ -0,0 +1,36 @@ +--- +"@ifc-lite/renderer": minor +"@ifc-lite/viewer": minor +--- + +Near-term UX features from #611. + +**Hover XYZ readback.** GPU pick now also samples the depth texel at +the click position and unprojects it through the inverse view- +projection. `PickResult` carries an optional `worldXYZ`. Reverse-Z is +honoured (depth=1 = near, 0 = far / miss). The hover tooltip shows +`x, y, z` (2 decimals) under the entity id. Useful for measurement +hooks and point-cloud picks where the synthetic entity has no +surface property to display. + +**Solid-color picker.** When the point-cloud panel's colour mode is +set to `fixed`, a native `` swatch appears. +Hex round-trips through the existing `[r,g,b,a]` store tuple. + +**Colour-mode legend.** A new `PointCloudLegend` component renders +inline beneath the colour-mode buttons: +- Classification → list of ASPRS LAS 1.4 class id / colour swatch / + label (Ground, Vegetation, Building, ...). Palette mirrors + `point-shader.wgsl.ts` exactly. +- Intensity → black-to-white gradient bar with low/high labels. +- Height → cool-warm gradient bar (blue → cyan → green → yellow → + red), matching the shader's `height_ramp`. +RGB and Solid don't render a legend. + +**Cancel button for in-flight streams.** New +`activeStreamCanceller` field on the loading slice. Both ingest +sites (`useIfcLoader`, `useIfcFederation`) register +`() => streamHandle.cancel()` after starting and clear on success / +error. `StatusBar` shows a Cancel button while the canceller is +non-null. AbortError on cancel is reported as "Cancelled" rather +than a scary error string. diff --git a/.changeset/pointcloud-pr614-review-r2.md b/.changeset/pointcloud-pr614-review-r2.md new file mode 100644 index 000000000..fc096a88e --- /dev/null +++ b/.changeset/pointcloud-pr614-review-r2.md @@ -0,0 +1,36 @@ +--- +"@ifc-lite/pointcloud": patch +"@ifc-lite/viewer": patch +--- + +Round 2 of CodeRabbit feedback on PR #614: + +- **E57 stride downsampling drops classifications.** `applyStride` rebuilt + positions / colors / intensities into new arrays but never copied the + per-point class IDs, so any non-default stride (`{ stride: 2 }` and up) + silently lost them and `hasClassification` flipped to false. +- **Federation abort can stomp a newer load.** The AbortError handler in + `useIfcFederation.addModel()` wrote `progress`, `error`, and `loading` + unconditionally — if a second `addModel()` started after the first was + cancelled, it lost its spinner and progress to the cancelled load's + cleanup. Added a `loadSessionRef` token (mirrors `useIfcLoader`) and + gate state writes on `loadSessionRef.current === currentSession`. +- **E57 Integer classification subtracts `minimum`.** Class IDs are + absolute labels (ASPRS LAS 1.4 0..31), not range-normalised offsets. + `raw - minimum` was corrupting class IDs whenever a producer declared + a non-zero `minimum` on the Integer-encoded classification field. The + Integer branch now matches the ScaledInteger branch's intent: keep + the raw byte, clamp to 0..255. +- **PCD probe missed `VERSION` / `FIELDS` headers.** The magic-byte + detector only recognised `# .PCD …` comment-style headers. Real PCDs + emitted by PCL's `pcl_io` and a few third-party tools start directly + with `VERSION 0.7\n…` or `FIELDS x y z\n…` — these now route through + the PCD decoder instead of falling through to extension-based + detection (which would mis-route a renamed PCD). +- **Catch-block logging.** Per repo convention, log point-cloud ingest + failures in `useIfcLoader.ts` before the early return so abort vs. + real-failure vs. stale-session paths are distinguishable in console + triage. + +Test cleanup: drop the shadowed (and unused) ScaledInteger packet +buffer in `e57.test.ts` so only the live `fullBuf` setup remains. diff --git a/.changeset/pointcloud-pts-xyz.md b/.changeset/pointcloud-pts-xyz.md new file mode 100644 index 000000000..a141feaf9 --- /dev/null +++ b/.changeset/pointcloud-pts-xyz.md @@ -0,0 +1,39 @@ +--- +"@ifc-lite/pointcloud": minor +"@ifc-lite/viewer": minor +--- + +PTS / XYZ ASCII point cloud reader. + +Both formats are line-oriented plain-text scans common in legacy +survey workflows. They share the same syntax — they differ only in +the optional first-line point count (PTS may have one; XYZ never +does). One shared decoder + streaming source handles both. + +Auto-detected per-line layouts (by column count of the first data +line): +- 3 cols → `X Y Z` +- 4 cols → `X Y Z I` (intensity) +- 6 cols → `X Y Z R G B` +- 7 cols → `X Y Z I R G B` (canonical PTS) +- 9 cols → `X Y Z R G B Nx Ny Nz` (XYZ-with-normals; normals dropped) +- 10 cols → `X Y Z I R G B Nx Ny Nz` (PTS-with-normals; normals dropped) +- For XYZ with unknown column counts ≥3 we still emit positions and + skip the rest, so weird custom exports load instead of erroring. + +Other behaviour: +- Comment lines (`#`, `//`) and blank lines are skipped. +- Intensity normalisation: 0..1 vs 0..255 vs raw sensor detected from + the observed maximum, then mapped to u16. +- RGB normalisation: same heuristic (>1.0 → 0..255 source). +- Whole-file decode wrapped in `AsciiPointsStreamingSource`; the + streaming host's 25M-point cap stride-downsamples on the way out. + +Wired into the decode worker, format detection +(`detectPointCloudFormat` returns `'pts'` / `'xyz'`), the file +picker accept lists, drop handlers, and both `useIfcLoader` / +`useIfcFederation` ingest branches. The "PTS / XYZ ASCII points — +not yet supported" toast is removed from `describeUnsupportedFormat`. + +10 new unit tests cover layout probing, decoder round-trips for the +common shapes, and the comment / header-count edge cases. diff --git a/.changeset/pointcloud-rect-pick.md b/.changeset/pointcloud-rect-pick.md new file mode 100644 index 000000000..bfd2ead23 --- /dev/null +++ b/.changeset/pointcloud-rect-pick.md @@ -0,0 +1,38 @@ +--- +"@ifc-lite/renderer": minor +"@ifc-lite/viewer": minor +--- + +GPU rectangle pick (marquee select) — meshes + point clouds. + +Hold `Ctrl` (or `⌘` on macOS) and drag with the left mouse button +in the select tool to draw a rectangle. On release, every entity +(mesh or point cloud) whose pixel falls inside the rect becomes +the new selection. A teal-dashed SVG outline tracks the drag. + +Implementation: +- `Picker.pickRect(x0, y0, x1, y1, …) → Set` renders the + same pick pass as `pick()` and reads back the texel rect, deduping + hits to a Set. Mesh + point splats both participate (point splats + share the depth buffer in the pick pass). +- A new private `Picker.renderPickPass` extracts the shared render- + pass setup so single-pixel `pick` and rect `pickRect` don't drift. +- `PickingManager.pickRect` applies the same visibility filtering + (`hiddenIds`, `isolatedIds`) as `pick`. The CPU-raycast and + dynamic-mesh-creation fallbacks `pick` uses for very large batched + models are skipped — rect pick only sees already-hydrated meshes. +- `Renderer.pickRect` exposes the manager's API. +- New `RectSelectionOverlay` component renders the dashed SVG box + while dragging; lives inside `Viewport.tsx` as a sibling of the + canvas. +- `useMouseControls` tracks a new `mouseState.isRectSelecting` flag, + suppresses orbit/pan during the drag, and on mouseup runs + `renderer.pickRect(...)` and feeds the result into + `setSelectedEntityIds`. A 4-pixel minimum rect size avoids + clobbering selection on a stray Ctrl-click. +- `MouseState.isRectSelecting?: boolean` and a new + `setRectSelection?` callback added to `UseMouseControlsParams`. + +Lasso (polygonal) pick still pending — covered by issue #611's +mid-term list. Per-class isolation for points is a separate +follow-up. diff --git a/.changeset/pointcloud-section-preview.md b/.changeset/pointcloud-section-preview.md new file mode 100644 index 000000000..3e9d92880 --- /dev/null +++ b/.changeset/pointcloud-section-preview.md @@ -0,0 +1,36 @@ +--- +"@ifc-lite/renderer": minor +"@ifc-lite/viewer": minor +--- + +Section-plane drag preview — render at 1/4 density during slider +drag for responsive section-cutting on huge point clouds. + +The splat shader gains a `previewStride` uniform that culls +`(instance_index % stride) != 0` at the start of `vs_main`. The +section-plane position slider wires `onPointerDown` to set +`previewStride: 4` and `onPointerUp` to restore `1`, so scans of +millions of points stay responsive while the user drags. + +Implementation: +- `POINT_UNIFORM_SIZE` bumped from 208 → 224 to add a new + `extras: vec4` slot. `extras.x` carries `previewStride`; + `yzw` reserved for future per-frame state. +- `PointCloudRenderOptions.previewStride?: number` clamped to + [1, 256] in the renderer. +- Vertex shader culls hidden instances by writing + `clipPos = vec4(0, 0, -2, 1)` (outside reverse-Z `[0, 1]`) so they + drop pre-rasterisation. +- New `pointCloudPreviewStride` field on the point cloud slice + (default 1) with `setPointCloudPreviewStride` action. +- `usePointCloudSync` forwards the stride to + `setPointCloudOptions`. +- `SectionOverlay`'s position slider triggers stride 4 on + drag start (pointer + keyboard), 1 on release. Only flips when + `pointCloudAssetCount > 0` so IFC-only sessions are unaffected. + +Triangle meshes ignore the stride — they're cheap enough that +section drag was already smooth. + +Verified: full repo typecheck (24/24), 655 viewer tests, viewer +Vite build green. diff --git a/apps/viewer/src/components/viewer/DeviationPanel.tsx b/apps/viewer/src/components/viewer/DeviationPanel.tsx new file mode 100644 index 000000000..73516c0a5 --- /dev/null +++ b/apps/viewer/src/components/viewer/DeviationPanel.tsx @@ -0,0 +1,172 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * BIM ↔ scan deviation heatmap controls. + * + * Renders a "Compute Deviation" button when the scene has at least + * one mesh and one point cloud. Once compute completes, exposes a + * range slider + diverging-ramp legend; the splat shader's + * deviation colour mode then visualises signed distance to the + * nearest mesh surface. + * + * Lives inside the `PointCloudPanel`; rendered conditionally on + * `pointCloudAssetCount > 0`. + */ + +import { useCallback, useState } from 'react'; +import { useViewerStore } from '@/store'; +import { getGlobalRenderer } from '@/hooks/useBCF'; +import { cn } from '@/lib/utils'; + +export interface DeviationPanelProps { + /** Total number of triangles currently in the scene — gates the + * compute button on the existence of a BIM model. */ + triangleCount: number; +} + +export function DeviationPanel({ triangleCount }: DeviationPanelProps) { + const halfRange = useViewerStore((s) => s.pointCloudDeviationHalfRange); + const setHalfRange = useViewerStore((s) => s.setPointCloudDeviationHalfRange); + const computed = useViewerStore((s) => s.pointCloudDeviationComputed); + const setComputed = useViewerStore((s) => s.setPointCloudDeviationComputed); + const colorMode = useViewerStore((s) => s.pointCloudColorMode); + const setColorMode = useViewerStore((s) => s.setPointCloudColorMode); + + const [running, setRunning] = useState(false); + const [stats, setStats] = useState<{ + triangles: number; + points: number; + durationMs: number; + } | null>(null); + const [error, setError] = useState(null); + + const handleCompute = useCallback(async () => { + const renderer = getGlobalRenderer(); + if (!renderer) { + setError('Renderer not initialised yet.'); + return; + } + setError(null); + setRunning(true); + const t0 = performance.now(); + try { + const result = await renderer.computeDeviations({ maxRange: 1.0 }); + const dt = performance.now() - t0; + if (result.pointsProcessed === 0) { + setError('No points processed — load a point cloud first.'); + setRunning(false); + return; + } + if (result.bvhTriangles === 0) { + setError('No mesh geometry in the scene — load an IFC first.'); + setRunning(false); + return; + } + setStats({ + triangles: result.bvhTriangles, + points: result.pointsProcessed, + durationMs: dt, + }); + setComputed(true); + // Default-pick a sensible half-range from the BVH's bbox if the + // user hasn't touched the slider yet (initial 5 cm is fine for + // small models but useless for a city-block scan). + if (halfRange === 0.05 && result.suggestedHalfRange !== 0.05) { + setHalfRange(result.suggestedHalfRange); + } + // Auto-switch the colour mode to deviation so the user sees + // the result immediately. + setColorMode('deviation'); + } catch (err) { + setError(err instanceof Error ? err.message : String(err)); + } finally { + setRunning(false); + } + }, [halfRange, setHalfRange, setColorMode, setComputed]); + + // Hide the panel entirely when there's no BIM to compare against. + // Point-cloud-only sessions (just a LAS / IFCx scan) have nothing + // to deviate from so the button would always fail. + if (triangleCount === 0) return null; + + return ( +
+ + Deviation (BIM ↔ scan) + + + {error && ( + {error} + )} + {stats && ( +
+ {stats.points.toLocaleString()} pts vs.{' '} + {stats.triangles.toLocaleString()} tris in{' '} + {Math.round(stats.durationMs)} ms +
+ )} + + {computed && ( + <> + {/* Range slider: half-width in mm. Range from 1 mm to 1 m + (logarithmic feel via the millimetre conversion). */} + + + {/* Legend: blue → white → red gradient with labelled endpoints. */} +
+
+ −{(halfRange * 1000).toFixed(0)}mm (inside) + 0 + +{(halfRange * 1000).toFixed(0)}mm (outside) +
+ + {colorMode !== 'deviation' && ( + + )} + + )} +
+ ); +} diff --git a/apps/viewer/src/components/viewer/HoverTooltip.tsx b/apps/viewer/src/components/viewer/HoverTooltip.tsx index a15112e9b..44ead3dec 100644 --- a/apps/viewer/src/components/viewer/HoverTooltip.tsx +++ b/apps/viewer/src/components/viewer/HoverTooltip.tsx @@ -77,6 +77,11 @@ export function HoverTooltip() {
#{hoverState.entityId}
+ {hoverState.worldXYZ && ( +
+ {hoverState.worldXYZ.x.toFixed(2)}, {hoverState.worldXYZ.y.toFixed(2)}, {hoverState.worldXYZ.z.toFixed(2)} +
+ )}
); } diff --git a/apps/viewer/src/components/viewer/MainToolbar.tsx b/apps/viewer/src/components/viewer/MainToolbar.tsx index 1991af1c8..862f74097 100644 --- a/apps/viewer/src/components/viewer/MainToolbar.tsx +++ b/apps/viewer/src/components/viewer/MainToolbar.tsx @@ -425,7 +425,7 @@ export function MainToolbar({ onShowShortcuts }: MainToolbarProps = {} as MainTo // Filter to supported files (IFC, IFCX, GLB) const supportedFiles = Array.from(files).filter( f => f.name.endsWith('.ifc') || f.name.endsWith('.ifcx') || f.name.endsWith('.glb') - || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') + || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') || f.name.toLowerCase().endsWith('.pts') || f.name.toLowerCase().endsWith('.xyz') ); if (supportedFiles.length === 0) return; @@ -466,7 +466,7 @@ export function MainToolbar({ onShowShortcuts }: MainToolbarProps = {} as MainTo // Filter to supported files (IFC, IFCX, GLB) const supportedFiles = Array.from(files).filter( f => f.name.endsWith('.ifc') || f.name.endsWith('.ifcx') || f.name.endsWith('.glb') - || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') + || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') || f.name.toLowerCase().endsWith('.pts') || f.name.toLowerCase().endsWith('.xyz') ); if (supportedFiles.length === 0) return; @@ -781,7 +781,7 @@ export function MainToolbar({ onShowShortcuts }: MainToolbarProps = {} as MainTo id="file-input-open" ref={fileInputRef} type="file" - accept=".ifc,.ifcx,.glb,.las,.laz,.ply,.pcd,.e57" + accept=".ifc,.ifcx,.glb,.las,.laz,.ply,.pcd,.e57,.pts,.xyz" multiple onChange={handleFileSelect} className="hidden" @@ -789,7 +789,7 @@ export function MainToolbar({ onShowShortcuts }: MainToolbarProps = {} as MainTo s.pointCloudClassMask); + const toggle = useViewerStore((s) => s.togglePointCloudClass); + const setMask = useViewerStore((s) => s.setPointCloudClassMask); + const allOn = (mask >>> 0) === ALL_VISIBLE; + return ( +
+ + Classes {!allOn && ( + · {countSet(mask)} of 32 visible + )} + +
+ + {CLASSES.map((c) => { + const visible = ((mask >>> c.id) & 1) !== 0; + return ( + + ); + })} +
+
+ ); +} + +function countSet(mask: number): number { + // Hamming weight via Brian Kernighan's algorithm. JS bitwise ops + // are 32-bit so we naturally cover the full ASPRS range. + let n = mask >>> 0; + let count = 0; + while (n !== 0) { + n &= n - 1; + count++; + } + return count; +} + +function rgbCss([r, g, b]: [number, number, number]): string { + const c = (v: number) => Math.max(0, Math.min(255, Math.round(v * 255))); + return `rgb(${c(r)},${c(g)},${c(b)})`; +} diff --git a/apps/viewer/src/components/viewer/PointCloudLegend.tsx b/apps/viewer/src/components/viewer/PointCloudLegend.tsx new file mode 100644 index 000000000..d561b898f --- /dev/null +++ b/apps/viewer/src/components/viewer/PointCloudLegend.tsx @@ -0,0 +1,119 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * Per-mode legend for the point-cloud panel. + * + * Renders only when the active colour mode benefits from a legend + * (classification / intensity / height); RGB and Solid don't need one. + * The palettes here MUST stay in sync with `point-shader.wgsl.ts` — + * any colour change in the shader has to come back to this file. + */ + +import type { PointColorModeUi } from '@/store/slices/pointCloudSlice'; + +interface ClassificationEntry { + id: number; + label: string; + rgb: [number, number, number]; +} + +// ASPRS LAS 1.4 standard classes — ids that don't appear here all +// fall back to the shader's "default" entry (0.65 grey) and are +// shown collectively at the bottom of the legend. +const CLASSIFICATION: ClassificationEntry[] = [ + { id: 0, label: 'Never classified', rgb: [0.65, 0.65, 0.65] }, + { id: 1, label: 'Unclassified', rgb: [0.65, 0.65, 0.65] }, + { id: 2, label: 'Ground', rgb: [0.55, 0.40, 0.25] }, + { id: 3, label: 'Low vegetation', rgb: [0.55, 0.85, 0.45] }, + { id: 4, label: 'Medium vegetation', rgb: [0.30, 0.75, 0.30] }, + { id: 5, label: 'High vegetation', rgb: [0.10, 0.45, 0.15] }, + { id: 6, label: 'Building', rgb: [0.95, 0.55, 0.20] }, + { id: 7, label: 'Low point (noise)', rgb: [0.95, 0.20, 0.20] }, + { id: 9, label: 'Water', rgb: [0.20, 0.40, 0.95] }, + { id: 10, label: 'Rail', rgb: [0.55, 0.20, 0.85] }, + { id: 11, label: 'Road surface', rgb: [0.30, 0.30, 0.30] }, + { id: 13, label: 'Wire — guard', rgb: [0.95, 0.85, 0.20] }, + { id: 14, label: 'Wire — conductor', rgb: [0.95, 0.95, 0.50] }, + { id: 15, label: 'Transmission tower', rgb: [0.20, 0.20, 0.55] }, + { id: 16, label: 'Wire-structure', rgb: [0.30, 0.65, 0.65] }, + { id: 17, label: 'Bridge deck', rgb: [0.85, 0.70, 0.50] }, + { id: 18, label: 'High noise', rgb: [0.95, 0.20, 0.20] }, +]; + +const HEIGHT_GRADIENT = + 'linear-gradient(to right, ' + + 'rgb(26,51,217), ' // 0.10, 0.20, 0.85 + + 'rgb(26,217,217), ' // 0.10, 0.85, 0.85 + + 'rgb(51,217,51), ' // 0.20, 0.85, 0.20 + + 'rgb(242,242,51), ' // 0.95, 0.95, 0.20 + + 'rgb(242,51,26))'; // 0.95, 0.20, 0.10 + +export interface PointCloudLegendProps { + colorMode: PointColorModeUi; +} + +export function PointCloudLegend({ colorMode }: PointCloudLegendProps) { + if (colorMode === 'classification') { + return ( +
+ + Classes (ASPRS LAS 1.4) + + {CLASSIFICATION.map((c) => ( +
+
+ ))} +
+ ); + } + + if (colorMode === 'intensity') { + return ( +
+ Intensity +
+
+ low + high +
+
+ ); + } + + if (colorMode === 'height') { + return ( +
+ Height (Y-up) +
+
+ low + high +
+
+ ); + } + + return null; +} + +function rgbCss([r, g, b]: [number, number, number]): string { + const c = (v: number) => Math.max(0, Math.min(255, Math.round(v * 255))); + return `rgb(${c(r)},${c(g)},${c(b)})`; +} diff --git a/apps/viewer/src/components/viewer/PointCloudPanel.tsx b/apps/viewer/src/components/viewer/PointCloudPanel.tsx index 0580fdc4a..b1c841700 100644 --- a/apps/viewer/src/components/viewer/PointCloudPanel.tsx +++ b/apps/viewer/src/components/viewer/PointCloudPanel.tsx @@ -12,6 +12,9 @@ import { useViewerStore } from '@/store'; import type { PointColorModeUi, PointSizeModeUi } from '@/store/slices/pointCloudSlice'; import { cn } from '@/lib/utils'; +import { PointCloudLegend } from './PointCloudLegend'; +import { PointCloudClasses } from './PointCloudClasses'; +import { DeviationPanel } from './DeviationPanel'; const COLOR_MODES: Array<{ value: PointColorModeUi; label: string; hint: string }> = [ { value: 'rgb', label: 'RGB', hint: 'Per-point colour from the source' }, @@ -19,6 +22,7 @@ const COLOR_MODES: Array<{ value: PointColorModeUi; label: string; hint: string { value: 'intensity', label: 'Intensity', hint: 'Greyscale ramp from per-point intensity' }, { value: 'height', label: 'Height', hint: 'Cool-warm ramp by Y-up world height' }, { value: 'fixed', label: 'Solid', hint: 'Single colour override' }, + { value: 'deviation', label: 'Deviation', hint: 'Signed distance to nearest BIM surface (compute below)' }, ]; const SIZE_MODES: Array<{ value: PointSizeModeUi; label: string; hint: string }> = [ @@ -30,9 +34,12 @@ const SIZE_MODES: Array<{ value: PointSizeModeUi; label: string; hint: string }> export interface PointCloudPanelProps { /** Number of currently-loaded point cloud assets — panel hides when 0. */ assetCount: number; + /** Total triangle count across the scene (gates the BIM↔scan deviation + * compute button — useless without a BIM model loaded). */ + triangleCount: number; } -export function PointCloudPanel({ assetCount }: PointCloudPanelProps) { +export function PointCloudPanel({ assetCount, triangleCount }: PointCloudPanelProps) { const colorMode = useViewerStore((s) => s.pointCloudColorMode); const setColorMode = useViewerStore((s) => s.setPointCloudColorMode); const sizeMode = useViewerStore((s) => s.pointCloudSizeMode); @@ -45,6 +52,8 @@ export function PointCloudPanel({ assetCount }: PointCloudPanelProps) { const setEdlEnabled = useViewerStore((s) => s.setPointCloudEdlEnabled); const edlStrength = useViewerStore((s) => s.pointCloudEdlStrength); const setEdlStrength = useViewerStore((s) => s.setPointCloudEdlStrength); + const fixedColor = useViewerStore((s) => s.pointCloudFixedColor); + const setFixedColor = useViewerStore((s) => s.setPointCloudFixedColor); if (assetCount <= 0) return null; @@ -81,8 +90,31 @@ export function PointCloudPanel({ assetCount }: PointCloudPanelProps) { ); })} + + {colorMode === 'fixed' && ( + // Native colour input — keeps the panel dependency-free. + // Hex round-trips through float[0..1]: parse `#rrggbb` to a + // [r,g,b,1] tuple on input, format the active rgb back to hex + // on display. Alpha stays 1 since fixed-mode opacity is + // controlled by the splat shape, not the colour swatch. + + )}
+ {/* Per-ASPRS-class visibility — toggles the splat shader's + class-mask uniform; works in any colour mode but most + discoverable when colorMode === 'classification'. */} + + {/* Size mode */}
Size @@ -169,6 +201,25 @@ export function PointCloudPanel({ assetCount }: PointCloudPanelProps) { )}
+ + {/* BIM↔scan deviation heatmap — only useful when both meshes + and points are loaded. The panel renders nothing when there + are no triangles in the scene. */} +
); } + +function rgbToHex([r, g, b]: [number, number, number, number]): string { + const c = (v: number) => Math.max(0, Math.min(255, Math.round(v * 255))).toString(16).padStart(2, '0'); + return `#${c(r)}${c(g)}${c(b)}`; +} + +function hexToRgba(hex: string, alpha: number): [number, number, number, number] { + // Browsers always emit "#rrggbb" from , so we + // can skip the 3-char shorthand path. Parse byte-by-byte and divide. + const r = parseInt(hex.slice(1, 3), 16) / 255; + const g = parseInt(hex.slice(3, 5), 16) / 255; + const b = parseInt(hex.slice(5, 7), 16) / 255; + return [r, g, b, alpha]; +} diff --git a/apps/viewer/src/components/viewer/RectSelectionOverlay.tsx b/apps/viewer/src/components/viewer/RectSelectionOverlay.tsx new file mode 100644 index 000000000..d313de1ba --- /dev/null +++ b/apps/viewer/src/components/viewer/RectSelectionOverlay.tsx @@ -0,0 +1,48 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * Visual overlay for the GPU rectangle-select drag (Ctrl/⌘ + LMB + * over the canvas in select mode). Renders an SVG outline whenever + * `rect` is non-null; the parent supplies / clears the prop in step + * with the mouse handler. + */ + +export interface RectSelectionRect { + x0: number; + y0: number; + x1: number; + y1: number; +} + +export interface RectSelectionOverlayProps { + rect: RectSelectionRect | null; +} + +export function RectSelectionOverlay({ rect }: RectSelectionOverlayProps) { + if (!rect) return null; + const left = Math.min(rect.x0, rect.x1); + const top = Math.min(rect.y0, rect.y1); + const width = Math.abs(rect.x1 - rect.x0); + const height = Math.abs(rect.y1 - rect.y0); + if (width < 1 || height < 1) return null; + return ( + + ); +} diff --git a/apps/viewer/src/components/viewer/StatusBar.tsx b/apps/viewer/src/components/viewer/StatusBar.tsx index 2576bb9f5..f35ababb6 100644 --- a/apps/viewer/src/components/viewer/StatusBar.tsx +++ b/apps/viewer/src/components/viewer/StatusBar.tsx @@ -15,6 +15,7 @@ export function StatusBar() { const progress = useViewerStore((s) => s.progress); const error = useViewerStore((s) => s.error); const selectedStoreys = useViewerStore((s) => s.selectedStoreys); + const activeStreamCanceller = useViewerStore((s) => s.activeStreamCanceller); const webgpu = useWebGPU(); const [fps, setFps] = useState(60); @@ -108,6 +109,19 @@ export function StatusBar() { ) : ( Ready )} + {/* Cancel button — only visible while a long-running stream + (LAS/LAZ/PLY/PCD/E57) is in flight. The loader hooks + register/clear the canceller around `await ingest.done`. */} + {activeStreamCanceller && ( + + )} {/* Center: Model Stats */} diff --git a/apps/viewer/src/components/viewer/Viewport.tsx b/apps/viewer/src/components/viewer/Viewport.tsx index b6b1ef45f..26650f525 100644 --- a/apps/viewer/src/components/viewer/Viewport.tsx +++ b/apps/viewer/src/components/viewer/Viewport.tsx @@ -32,6 +32,7 @@ import { import { setGlobalCanvasRef, setGlobalRendererRef, clearGlobalRefs } from '../../hooks/useBCF.js'; import { useMouseControls, type MouseState } from './useMouseControls.js'; +import { RectSelectionOverlay, type RectSelectionRect } from './RectSelectionOverlay.js'; import { useTouchControls, type TouchState } from './useTouchControls.js'; import { useKeyboardControls } from './useKeyboardControls.js'; import { useAnimationLoop } from './useAnimationLoop.js'; @@ -717,6 +718,10 @@ export function Viewport({ // The animation loop reads this to skip post-processing during rapid camera movement. const isInteractingRef = useRef(false); + // Rectangle-select drag state — populated by useMouseControls during + // a Ctrl/⌘ + LMB drag, consumed by RectSelectionOverlay below. + const [rectSelection, setRectSelection] = useState(null); + // ===== Extracted hooks ===== useMouseControls({ canvasRef, @@ -751,6 +756,7 @@ export function Viewport({ handlePickForSelection: (pickResult) => handlePickForSelectionRef.current(pickResult), setHoverState, clearHover, + setRectSelection, openContextMenu, startMeasurement, updateMeasurement, @@ -918,13 +924,18 @@ export function Viewport({ : undefined; return ( - +
+ + {/* Rectangle-select drag visual. Pointer-events:none so the + canvas keeps receiving pointer events during the drag. */} + +
); } diff --git a/apps/viewer/src/components/viewer/ViewportContainer.tsx b/apps/viewer/src/components/viewer/ViewportContainer.tsx index 073fb29e6..b2986e690 100644 --- a/apps/viewer/src/components/viewer/ViewportContainer.tsx +++ b/apps/viewer/src/components/viewer/ViewportContainer.tsx @@ -311,7 +311,7 @@ export function ViewportContainer() { const allDropped = Array.from(e.dataTransfer.files); const supportedFiles = allDropped.filter( f => f.name.endsWith('.ifc') || f.name.endsWith('.ifcx') || f.name.endsWith('.glb') - || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') + || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') || f.name.toLowerCase().endsWith('.pts') || f.name.toLowerCase().endsWith('.xyz') ); if (supportedFiles.length === 0) { @@ -354,7 +354,7 @@ export function ViewportContainer() { // Filter to supported files (IFC, IFCX, GLB) const supportedFiles = Array.from(files).filter( f => f.name.endsWith('.ifc') || f.name.endsWith('.ifcx') || f.name.endsWith('.glb') - || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') + || f.name.toLowerCase().endsWith('.las') || f.name.toLowerCase().endsWith('.laz') || f.name.toLowerCase().endsWith('.ply') || f.name.toLowerCase().endsWith('.pcd') || f.name.toLowerCase().endsWith('.e57') || f.name.toLowerCase().endsWith('.pts') || f.name.toLowerCase().endsWith('.xyz') ); if (supportedFiles.length === 0) return; @@ -566,7 +566,7 @@ export function ViewportContainer() { s.pointCloudAssetCount); - return ; + // Triangle total comes from the merged geometry result. The panel + // gates the BIM↔scan deviation compute button on triangleCount > 0 + // so the user can't trigger an empty-BVH compute pass. + const triangleCount = useViewerStore((s) => s.geometryResult?.totalTriangles ?? 0); + return ; } diff --git a/apps/viewer/src/components/viewer/tools/SectionPanel.tsx b/apps/viewer/src/components/viewer/tools/SectionPanel.tsx index 998fbb1b7..0a5802559 100644 --- a/apps/viewer/src/components/viewer/tools/SectionPanel.tsx +++ b/apps/viewer/src/components/viewer/tools/SectionPanel.tsx @@ -6,7 +6,7 @@ * Section plane controls panel */ -import React, { useCallback, useState } from 'react'; +import React, { useCallback, useEffect, useState } from 'react'; import { X, Slice, ChevronDown, FileImage, FlipHorizontal2 } from 'lucide-react'; import { Button } from '@/components/ui/button'; import { useViewerStore } from '@/store'; @@ -20,6 +20,8 @@ export function SectionOverlay() { const setSectionPlanePosition = useViewerStore((s) => s.setSectionPlanePosition); const toggleSectionPlane = useViewerStore((s) => s.toggleSectionPlane); const flipSectionPlane = useViewerStore((s) => s.flipSectionPlane); + const setPreviewStride = useViewerStore((s) => s.setPointCloudPreviewStride); + const pointCloudAssetCount = useViewerStore((s) => s.pointCloudAssetCount); const setActiveTool = useViewerStore((s) => s.setActiveTool); const setDrawingPanelVisible = useViewerStore((s) => s.setDrawing2DPanelVisible); const drawingPanelVisible = useViewerStore((s) => s.drawing2DPanelVisible); @@ -41,6 +43,22 @@ export function SectionOverlay() { } }, [setSectionPlanePosition]); + // Section-plane drag preview: while the user is actively dragging + // the position slider, render the splat shader at 1/4 density so + // huge scans (>10M points) keep up. Restored on release. + const handleSliderDragStart = useCallback(() => { + if (pointCloudAssetCount > 0) setPreviewStride(4); + }, [setPreviewStride, pointCloudAssetCount]); + const handleSliderDragEnd = useCallback(() => { + setPreviewStride(1); + }, [setPreviewStride]); + // Reset stride if the panel disappears mid-drag (e.g. user closes + // the section tool without releasing the slider). Without this the + // store can stay stuck at 4 and keep scans thinned indefinitely. + useEffect(() => { + return () => setPreviewStride(1); + }, [setPreviewStride]); + const togglePanel = useCallback(() => { setIsPanelCollapsed(prev => !prev); }, []); @@ -138,6 +156,16 @@ export function SectionOverlay() { step="0.1" value={sectionPlane.position} onChange={handlePositionChange} + onPointerDown={handleSliderDragStart} + onPointerUp={handleSliderDragEnd} + // pointercancel + blur cover the cases where the + // browser steals capture (touch scroll, OS gesture) + // or the user tabs away without releasing — the + // store would otherwise stay at stride 4. + onPointerCancel={handleSliderDragEnd} + onBlur={handleSliderDragEnd} + onKeyDown={handleSliderDragStart} + onKeyUp={handleSliderDragEnd} aria-label="Section plane position slider" className="w-full h-2 bg-muted rounded-lg appearance-none cursor-pointer accent-primary" /> diff --git a/apps/viewer/src/components/viewer/useMouseControls.ts b/apps/viewer/src/components/viewer/useMouseControls.ts index f2630c598..88b00c784 100644 --- a/apps/viewer/src/components/viewer/useMouseControls.ts +++ b/apps/viewer/src/components/viewer/useMouseControls.ts @@ -41,6 +41,12 @@ export interface MouseState { startX: number; startY: number; didDrag: boolean; + /** + * True while the user is mid-drag in rectangle-select mode (Ctrl/⌘ + * held over the canvas in select tool). Suppresses orbit/pan in + * the drag handlers and triggers `pickRect` on mouseup. + */ + isRectSelecting?: boolean; } export interface UseMouseControlsParams { @@ -102,7 +108,20 @@ export interface UseMouseControlsParams { // Callbacks handlePickForSelection: (pickResult: PickResult | null) => void; - setHoverState: (state: { entityId: number; screenX: number; screenY: number }) => void; + setHoverState: (state: { + entityId: number; + screenX: number; + screenY: number; + worldXYZ?: { x: number; y: number; z: number }; + }) => void; + /** + * Called during a rectangle-selection drag with the current rect + * (CSS pixels, canvas-relative). Passed `null` on drag end to clear + * any visual overlay. The hook handles the actual `pickRect` call + * + selection update internally; this callback is only for the + * overlay visual. + */ + setRectSelection?: (rect: { x0: number; y0: number; x1: number; y1: number } | null) => void; clearHover: () => void; openContextMenu: (entityId: number | null, screenX: number, screenY: number) => void; startMeasurement: (point: MeasurePoint) => void; @@ -186,6 +205,7 @@ export function useMouseControls(params: UseMouseControlsParams): void { calculateScale, getPickOptions, hasPendingMeasurements, + setRectSelection, HOVER_SNAP_THROTTLE_MS, SLOW_RAYCAST_THRESHOLD_MS, hoverThrottleMs, @@ -258,10 +278,23 @@ export function useMouseControls(params: UseMouseControlsParams): void { mouseState.startX = e.clientX; mouseState.startY = e.clientY; mouseState.didDrag = false; + mouseState.isRectSelecting = false; // Determine action based on active tool and mouse button const tool = activeToolRef.current; + // Rectangle-select gesture: Ctrl/⌘ + LMB drag while in the + // select tool. Suppresses orbit/pan; the rect is finalised + // and pick happens on mouseup. + if (tool === 'select' && e.button === 0 && (e.ctrlKey || e.metaKey)) { + mouseState.isRectSelecting = true; + const rect = canvas.getBoundingClientRect(); + const cx = e.clientX - rect.left; + const cy = e.clientY - rect.top; + setRectSelection?.({ x0: cx, y0: cy, x1: cx, y1: cy }); + return; + } + // Will this mousedown lead to an orbit drag? const isPanGesture = tool === 'pan' || e.button === 1 || e.button === 2 || (tool === 'select' && e.shiftKey); @@ -357,6 +390,18 @@ export function useMouseControls(params: UseMouseControlsParams): void { const y = e.clientY - rect.top; const tool = activeToolRef.current; + // Rectangle-select drag: just update the visual; no orbit / pan + // / pick / hover work happens in this branch. + if (mouseState.isRectSelecting) { + setRectSelection?.({ + x0: mouseState.startX - rect.left, + y0: mouseState.startY - rect.top, + x1: x, + y1: y, + }); + return; + } + // Handle measure tool live preview while dragging // IMPORTANT: Check tool first, not activeMeasurement, to prevent orbit conflict if (tool === 'measure' && mouseState.isDragging && activeMeasurementRef.current) { @@ -421,7 +466,12 @@ export function useMouseControls(params: UseMouseControlsParams): void { // Uses visibility filtering so hidden elements don't show hover tooltips const pickResult = await renderer.pick(x, y, getPickOptions()); if (pickResult) { - setHoverState({ entityId: pickResult.expressId, screenX: e.clientX, screenY: e.clientY }); + setHoverState({ + entityId: pickResult.expressId, + screenX: e.clientX, + screenY: e.clientY, + worldXYZ: pickResult.worldXYZ, + }); } else { clearHover(); } @@ -441,6 +491,39 @@ export function useMouseControls(params: UseMouseControlsParams): void { const tool = activeToolRef.current; + // Rectangle-select finalisation: run pickRect against the + // dragged rect, replace the current selection with the result, + // then clear the visual. + if (mouseState.isRectSelecting) { + const canvasRect = canvas.getBoundingClientRect(); + const x0 = mouseState.startX - canvasRect.left; + const y0 = mouseState.startY - canvasRect.top; + const x1 = e.clientX - canvasRect.left; + const y1 = e.clientY - canvasRect.top; + // Tiny rect (just a click + tiny twitch) → no-op so we don't + // accidentally clear selection on a missed Ctrl-click. + const rectSize = Math.max(Math.abs(x1 - x0), Math.abs(y1 - y0)); + if (rectSize >= 4) { + // pickRect can reject on WebGPU validation / device-loss + // paths — swallow the error so the pointer event doesn't + // surface an unhandled rejection. Selection stays + // untouched on failure (better UX than clearing it). + void renderer + .pickRect(x0, y0, x1, y1, getPickOptions()) + .then((ids) => { + useViewerStore.getState().setSelectedEntityIds(Array.from(ids)); + }) + .catch((error) => { + console.warn('[useMouseControls] Rectangle selection failed:', error); + }); + } + setRectSelection?.(null); + mouseState.isRectSelecting = false; + mouseState.isDragging = false; + mouseState.isPanning = false; + return; + } + // Handle measure tool completion if (tool === 'measure' && activeMeasurementRef.current) { if (handleMeasureUp(ctx, e)) return; diff --git a/apps/viewer/src/components/viewer/usePointCloudSync.ts b/apps/viewer/src/components/viewer/usePointCloudSync.ts index 63f1030c6..9cad9796d 100644 --- a/apps/viewer/src/components/viewer/usePointCloudSync.ts +++ b/apps/viewer/src/components/viewer/usePointCloudSync.ts @@ -37,6 +37,10 @@ export function usePointCloudSync(params: UsePointCloudSyncParams): void { const roundShape = useViewerStore((s) => s.pointCloudRoundShape); const edlEnabled = useViewerStore((s) => s.pointCloudEdlEnabled); const edlStrength = useViewerStore((s) => s.pointCloudEdlStrength); + const classMask = useViewerStore((s) => s.pointCloudClassMask); + const previewStride = useViewerStore((s) => s.pointCloudPreviewStride); + const deviationCenter = useViewerStore((s) => s.pointCloudDeviationCenterOffset); + const deviationHalf = useViewerStore((s) => s.pointCloudDeviationHalfRange); const setAssetCount = useViewerStore((s) => s.setPointCloudAssetCount); const fittedRef = useRef(false); @@ -82,9 +86,12 @@ export function usePointCloudSync(params: UsePointCloudSyncParams): void { pointSize, worldRadius, roundShape, + classMask, + previewStride, + deviationRange: { centerOffset: deviationCenter, halfRange: deviationHalf }, }); renderer.requestRender(); - }, [colorMode, fixedColor, sizeMode, pointSize, worldRadius, roundShape, isInitialized, rendererRef]); + }, [colorMode, fixedColor, sizeMode, pointSize, worldRadius, roundShape, classMask, previewStride, deviationCenter, deviationHalf, isInitialized, rendererRef]); // Push EDL toggle + strength to the renderer. useEffect(() => { diff --git a/apps/viewer/src/hooks/ingest/pointCloudIngest.ts b/apps/viewer/src/hooks/ingest/pointCloudIngest.ts index d3f7a6337..3a67f7e42 100644 --- a/apps/viewer/src/hooks/ingest/pointCloudIngest.ts +++ b/apps/viewer/src/hooks/ingest/pointCloudIngest.ts @@ -23,7 +23,7 @@ import type { IfcDataStore } from '@ifc-lite/parser'; import type { SchemaVersion } from '../../store/types.js'; import { createCoordinateInfo } from '../../utils/localParsingUtils.js'; -export type PointCloudFormat = 'las' | 'laz' | 'ply' | 'pcd' | 'e57'; +export type PointCloudFormat = 'las' | 'laz' | 'ply' | 'pcd' | 'e57' | 'pts' | 'xyz'; /** * IfcTypeEnum.IfcGeographicElement — the closest IFC4 entity for a scan @@ -182,27 +182,60 @@ export function detectPointCloudFormat( fileName: string, buffer: ArrayBuffer | null, ): PointCloudFormat | null { - const lower = fileName.toLowerCase(); - if (lower.endsWith('.las')) return 'las'; - if (lower.endsWith('.laz')) return 'laz'; - if (lower.endsWith('.ply')) return 'ply'; - if (lower.endsWith('.pcd')) return 'pcd'; - if (lower.endsWith('.e57')) return 'e57'; + // Magic bytes win over extension when both are available — a LAS + // file dropped as `*.ply` should still load as LAS, not be forced + // through the wrong decoder. PTS / XYZ are ASCII so they have no + // distinctive magic and stay extension-only at the bottom. if (buffer && buffer.byteLength >= 8) { const view = new DataView(buffer, 0, Math.min(buffer.byteLength, 32)); - if (view.getUint32(0, true) === 0x4653414c) return 'las'; - // ASCII probe — first three bytes "ply" → PLY; "# .P" or ".PCD" → PCD. - const b0 = view.getUint8(0), b1 = view.getUint8(1), b2 = view.getUint8(2); - if (b0 === 0x70 /* p */ && b1 === 0x6c /* l */ && b2 === 0x79 /* y */) return 'ply'; - if (b0 === 0x23 /* # */ && view.byteLength > 4 && view.getUint8(2) === 0x2e /* . */) return 'pcd'; - // E57 magic = "ASTM-E57" (8 bytes) + // E57 magic = "ASTM-E57" (8 bytes) — check before LAS so files + // can't accidentally match on the LAS magic in their first 4 bytes. if ( view.getUint8(0) === 0x41 && view.getUint8(1) === 0x53 && view.getUint8(2) === 0x54 && view.getUint8(3) === 0x4d && view.getUint8(4) === 0x2d && view.getUint8(5) === 0x45 && view.getUint8(6) === 0x35 && view.getUint8(7) === 0x37 ) return 'e57'; + if (view.getUint32(0, true) === 0x4653414c /* "LASF" little-endian */) { + // LAS and LAZ share the LASF magic; differentiate by extension + // when available, otherwise default to LAS (laz-perf will throw + // a clear error on a non-LAZ payload). + const lower = fileName.toLowerCase(); + if (lower.endsWith('.laz')) return 'laz'; + return 'las'; + } + // ASCII probes: "ply" header / PCD header line. + const b0 = view.getUint8(0), b1 = view.getUint8(1), b2 = view.getUint8(2); + if (b0 === 0x70 /* p */ && b1 === 0x6c /* l */ && b2 === 0x79 /* y */) return 'ply'; + // PCDs in the wild use three header shapes: + // 1. `# .PCD v0.7\n…` — original commented header + // 2. `VERSION 0.7\n…` — version-first (PCL pcl_io) + // 3. `FIELDS x y z\n…` — fields-first (some converters) + // Match all three so a renamed PCD doesn't fall through to the + // extension-based detector. + if (b0 === 0x23 /* # */ && view.byteLength > 4 && view.getUint8(2) === 0x2e /* . */) return 'pcd'; + if ( + b0 === 0x56 /* V */ && b1 === 0x45 /* E */ && b2 === 0x52 /* R */ + && view.byteLength > 7 && view.getUint8(3) === 0x53 /* S */ + && view.getUint8(4) === 0x49 /* I */ && view.getUint8(5) === 0x4f /* O */ + && view.getUint8(6) === 0x4e /* N */ + ) return 'pcd'; + if ( + b0 === 0x46 /* F */ && b1 === 0x49 /* I */ && b2 === 0x45 /* E */ + && view.byteLength > 6 && view.getUint8(3) === 0x4c /* L */ + && view.getUint8(4) === 0x44 /* D */ && view.getUint8(5) === 0x53 /* S */ + ) return 'pcd'; } + // Fall back to extension when the buffer is missing / too short + // OR for ASCII formats (PTS / XYZ) that don't carry a magic header. + const lower = fileName.toLowerCase(); + if (lower.endsWith('.las')) return 'las'; + if (lower.endsWith('.laz')) return 'laz'; + if (lower.endsWith('.ply')) return 'ply'; + if (lower.endsWith('.pcd')) return 'pcd'; + if (lower.endsWith('.e57')) return 'e57'; + if (lower.endsWith('.pts')) return 'pts'; + if (lower.endsWith('.xyz')) return 'xyz'; return null; } @@ -228,9 +261,6 @@ export function describeUnsupportedFormat(fileName: string): string | null { if (lower.endsWith('.fls') || lower.endsWith('.lsproj')) { return 'Faro Scene project — export to E57 from Scene to load it here.'; } - if (lower.endsWith('.pts') || lower.endsWith('.xyz')) { - return 'PTS / XYZ ASCII points — not yet supported (export to PLY or LAS).'; - } return null; } @@ -243,6 +273,76 @@ export function describeUnsupportedFormat(fileName: string): string | null { */ let nextSyntheticExpressId = 1; +/** + * Counter shared across all in-flight ingests. We log up to + * `DEBUG_CLASS_LOG_LIMIT` chunks total per page session — enough to + * see whether the first scan's classifications are reaching the + * renderer without spamming the console for users with many files. + * + * Reset to zero on a hot module reload (HMR re-evaluates the module), + * so the dev workflow is "load file → see ≤ 3 chunk diagnostics". + */ +const DEBUG_CLASS_LOG_LIMIT = 3; +let debugClassChunkLogs = 0; + +/** + * Log presence + 16-bin histogram of the chunk's classification IDs. + * Used to debug "classification colour mode shows everything as + * unclassified". Common causes the histogram surfaces immediately: + * - chunk.classifications is undefined → format / decoder didn't + * emit it (look at the format's streaming source). + * - All values 0 or 1 → file is genuinely unclassified (LAS spec + * classes 0 = "Created, never classified", 1 = "Unclassified"); + * not a viewer bug. + * - Non-trivial spread but rendering is grey → packing or shader + * read is wrong. + */ +function logChunkClassHistogram( + fileName: string, + format: PointCloudFormat, + chunk: DecodedPointChunk, +): void { + const classes = chunk.classifications; + if (!classes) { + // E57 has no standard classification field per ASTM E2807, so + // most scans (Faro Focus, Leica BLK, Trimble) won't carry one. + // A non-standard `classification` prototype field IS now read + // when present; absence here means the file genuinely doesn't + // include per-point class IDs. + const hint = format === 'e57' + ? ' (E57 spec doesn\'t define classification — file must be from CloudCompare or a custom LIDAR pipeline to have it)' + : ' (decoder didn\'t emit any per-point class IDs)'; + console.log( + `[pointcloud-debug] ${format} ${fileName} chunk #${debugClassChunkLogs}: ` + + `pointCount=${chunk.pointCount} classifications=undefined${hint}`, + ); + return; + } + // 32-wide histogram (covers the ASPRS LAS 1.4 standard range). + // Anything past 31 lands in `overflow` so misclassified high + // values still surface. + const hist = new Uint32Array(32); + let overflow = 0; + let sample: number[] = []; + const n = Math.min(classes.length, chunk.pointCount); + for (let i = 0; i < n; i++) { + const c = classes[i]; + if (c < 32) hist[c]++; + else overflow++; + if (sample.length < 8) sample.push(c); + } + const nonZero: string[] = []; + for (let c = 0; c < 32; c++) { + if (hist[c] > 0) nonZero.push(`${c}=${hist[c]}`); + } + if (overflow > 0) nonZero.push(`>31:${overflow}`); + console.log( + `[pointcloud-debug] ${format} ${fileName} chunk #${debugClassChunkLogs}: ` + + `pointCount=${chunk.pointCount} classes.length=${classes.length} ` + + `first8=[${sample.join(',')}] hist={${nonZero.join(', ')}}`, + ); +} + /** * Stream a point cloud into the renderer. Returns immediately; await * `result.done` for completion. @@ -284,6 +384,17 @@ export function ingestPointCloud(opts: PointCloudIngestOptions): PointCloudInges }); }, onChunk: (chunk) => { + // Per-chunk classification diagnostic. Logs whether the + // chunk carries a classifications buffer and a 16-bin class + // histogram for the first few chunks of each stream so it's + // easy to see whether the source actually carries class IDs + // (LAS files often have everything as 0/1 for "unclassified"). + // Capped at 3 logs per stream to keep the console readable; + // further debug-on-demand can be done from devtools. + if (debugClassChunkLogs < DEBUG_CLASS_LOG_LIMIT) { + debugClassChunkLogs++; + logChunkClassHistogram(opts.fileName, opts.format, chunk); + } // LAS / LAZ / E57 / typical scan-style PLY + PCD all store data // Z-up by convention (LIDAR / surveying tradition). The renderer // is Y-up internally — the IFCx ingest path applies the same diff --git a/apps/viewer/src/hooks/useIfcFederation.ts b/apps/viewer/src/hooks/useIfcFederation.ts index d8d824f1f..ec28050f3 100644 --- a/apps/viewer/src/hooks/useIfcFederation.ts +++ b/apps/viewer/src/hooks/useIfcFederation.ts @@ -10,7 +10,7 @@ * Extracted from useIfc.ts for better separation of concerns */ -import { useCallback } from 'react'; +import { useCallback, useRef } from 'react'; import { useShallow } from 'zustand/react/shallow'; import { useViewerStore, type FederatedModel, type SchemaVersion } from '../store.js'; import { @@ -372,6 +372,13 @@ export function useIfcFederation() { findModelForGlobalId: s.findModelForGlobalId, }))); + // Per-call ownership token. Each addModel() bumps this; state writes + // (loading/error/progress) in the catch block must compare back to + // their captured value before mutating, so a cancelled load A doesn't + // overwrite progress for a newer load B that started after A's abort. + // Mirrors the same pattern in useIfcLoader.ts. + const loadSessionRef = useRef(0); + /** * Add a model to the federation (multi-model support) * Uses FederationRegistry to assign unique ID offsets - BULLETPROOF against ID collisions @@ -389,6 +396,7 @@ export function useIfcFederation() { ): Promise => { const modelId = options?.modelId ?? crypto.randomUUID(); const addStart = performance.now(); + const currentSession = ++loadSessionRef.current; try { // IMPORTANT: Before adding a new model, check if there's a legacy model // (loaded via loadFile) that's not in the Map yet. If so, migrate it first. @@ -461,7 +469,7 @@ export function useIfcFederation() { // depends on persisting it onto the FederatedModel record. let pointCloudHandleId: number | undefined; - if (format === 'las' || format === 'laz' || format === 'ply' || format === 'pcd' || format === 'e57') { + if (format === 'las' || format === 'laz' || format === 'ply' || format === 'pcd' || format === 'e57' || format === 'pts' || format === 'xyz') { const renderer = getGlobalRenderer(); if (!renderer) { setError('Renderer not initialised — try again after the viewer mounts.'); @@ -482,11 +490,25 @@ export function useIfcFederation() { onProgress: setProgress, onAssetCountDelta: incCount, }); + // Expose cancellation while the stream is in-flight. Capture + // the canceller as a named ref so the cleanup can verify the + // store still points at us before clearing — a second + // addModel() that began before this one settles must not lose + // its Cancel button to our finally block. + const { setActiveStreamCanceller } = useViewerStore.getState(); + const cancelStream = () => ingest.streamHandle.cancel(); + setActiveStreamCanceller(cancelStream); // ingest.done rejects on stream errors; ingestPointCloud's onError // callback already calls removePointCloudAsset + incCount(-1), so // the outer catch must NOT repeat that cleanup or the count goes // negative when other point clouds are still loaded. - await ingest.done; + try { + await ingest.done; + } finally { + if (useViewerStore.getState().activeStreamCanceller === cancelStream) { + setActiveStreamCanceller(null); + } + } parsedDataStore = ingest.dataStore; parsedGeometry = ingest.geometryResult; schemaVersion = ingest.schemaVersion; @@ -652,9 +674,28 @@ export function useIfcFederation() { return modelId; } catch (err) { + // Only mutate shared loading/error/progress state if our session + // is still the active one. A second addModel() that started after + // we were cancelled has already taken over the spinner — we must + // not overwrite it with our "Cancelled" state. + const isCurrent = loadSessionRef.current === currentSession; + // User-initiated cancel surfaces as an AbortError. Map it to a + // benign "Cancelled" state so the federated path matches the + // single-model loader rather than reporting a parse failure. + if (err instanceof DOMException && err.name === 'AbortError') { + console.log('[useIfc] addModel cancelled by user'); + if (isCurrent) { + setError(null); + setProgress({ phase: 'Cancelled', percent: 0 }); + setLoading(false); + } + return null; + } console.error('[useIfc] addModel failed:', err); - setError(err instanceof Error ? err.message : 'Unknown error'); - setLoading(false); + if (isCurrent) { + setError(err instanceof Error ? err.message : 'Unknown error'); + setLoading(false); + } return null; } }, [setLoading, setError, setProgress, setIfcDataStore, setGeometryResult, storeAddModel, hasModels, registerModelOffset]); diff --git a/apps/viewer/src/hooks/useIfcLoader.ts b/apps/viewer/src/hooks/useIfcLoader.ts index da2529895..592df129a 100644 --- a/apps/viewer/src/hooks/useIfcLoader.ts +++ b/apps/viewer/src/hooks/useIfcLoader.ts @@ -1569,7 +1569,7 @@ export function useIfcLoader() { // LAS / LAZ point clouds: stream chunks straight to the renderer. // No on-disk cache, no server upload — the data goes worker → GPU. - if (format === 'las' || format === 'laz' || format === 'ply' || format === 'pcd' || format === 'e57') { + if (format === 'las' || format === 'laz' || format === 'ply' || format === 'pcd' || format === 'e57' || format === 'pts' || format === 'xyz') { const renderer = getGlobalRenderer(); if (!renderer) { setError('Renderer not initialised — try again after the viewer mounts.'); @@ -1590,6 +1590,20 @@ export function useIfcLoader() { onProgress: setProgress, onAssetCountDelta: incCount, }); + // Expose cancellation to the UI (StatusBar shows a Cancel + // button while this is non-null). Cleared via the + // `clearOwnedCanceller` helper below so a later load that + // installed its own canceller never gets clobbered by our + // cleanup paths — the helper only nulls the store when the + // stored function is still ours. + const { setActiveStreamCanceller } = useViewerStore.getState(); + const cancelStream = () => ingest.streamHandle.cancel(); + setActiveStreamCanceller(cancelStream); + const clearOwnedCanceller = () => { + if (useViewerStore.getState().activeStreamCanceller === cancelStream) { + setActiveStreamCanceller(null); + } + }; // ingestPointCloud's onError callback already runs renderer cleanup // + incCount(-1); the outer catch must NOT repeat them or the // pointCloudAssetCount will go negative. @@ -1601,15 +1615,38 @@ export function useIfcLoader() { // the spinner / model record now. Free the renderer handle // so we don't leak the half-streamed asset. if (loadSessionRef.current !== currentSession) { + console.warn( + `[useIfc] pointcloud ingest rejected on stale session (handle=${ingest.rendererHandle.id}):`, + err, + ); renderer.removePointCloudAsset(ingest.rendererHandle); + clearOwnedCanceller(); return; } const message = err instanceof Error ? err.message : String(err); - updateModel(primaryModelId, { loadState: 'error', loadError: message }); - setError(`${format.toUpperCase()} parsing failed: ${message}`); + // Distinguish a user-initiated abort from a real failure so + // the status bar shows "Cancelled" instead of a scary error. + const isAbort = err instanceof DOMException && err.name === 'AbortError'; + if (isAbort) { + console.log( + `[useIfc] pointcloud ingest cancelled (model=${primaryModelId}, handle=${ingest.rendererHandle.id})`, + ); + updateModel(primaryModelId, { loadState: 'error', loadError: 'cancelled' }); + setError(null); + setProgress({ phase: 'Cancelled', percent: 0 }); + } else { + console.error( + `[useIfc] pointcloud ingest failed (format=${format}, model=${primaryModelId}):`, + err, + ); + updateModel(primaryModelId, { loadState: 'error', loadError: message }); + setError(`${format.toUpperCase()} parsing failed: ${message}`); + } + clearOwnedCanceller(); setLoading(false); return; } + clearOwnedCanceller(); if (loadSessionRef.current !== currentSession) { // A newer load already began. Drop our streamed asset and // skip every store/UI mutation so we don't overwrite the diff --git a/apps/viewer/src/store/slices/loadingSlice.ts b/apps/viewer/src/store/slices/loadingSlice.ts index 7ccd3ea2a..0d56ad182 100644 --- a/apps/viewer/src/store/slices/loadingSlice.ts +++ b/apps/viewer/src/store/slices/loadingSlice.ts @@ -16,6 +16,15 @@ export interface LoadingSlice { geometryProgress: { phase: string; percent: number; indeterminate?: boolean } | null; metadataProgress: { phase: string; percent: number; indeterminate?: boolean } | null; error: string | null; + /** + * Cancellation hook for an in-flight long-running operation (e.g. + * streaming a 100M-point scan). UI components can show a Cancel + * button while this is non-null. The loader hooks register the + * canceller after starting the stream and clear it on success / + * error. Kept on the loading slice (not its own slice) since it + * tracks lifecycle alongside `progress`. + */ + activeStreamCanceller: (() => void) | null; // Actions setLoading: (loading: boolean) => void; @@ -24,6 +33,7 @@ export interface LoadingSlice { setGeometryProgress: (progress: { phase: string; percent: number; indeterminate?: boolean } | null) => void; setMetadataProgress: (progress: { phase: string; percent: number; indeterminate?: boolean } | null) => void; setError: (error: string | null) => void; + setActiveStreamCanceller: (cancel: (() => void) | null) => void; } export const createLoadingSlice: StateCreator = (set) => ({ @@ -34,6 +44,7 @@ export const createLoadingSlice: StateCreator set({ loading }), @@ -42,4 +53,5 @@ export const createLoadingSlice: StateCreator set({ geometryProgress }), setMetadataProgress: (metadataProgress) => set({ metadataProgress }), setError: (error) => set({ error }), + setActiveStreamCanceller: (activeStreamCanceller) => set({ activeStreamCanceller }), }); diff --git a/apps/viewer/src/store/slices/pointCloudSlice.ts b/apps/viewer/src/store/slices/pointCloudSlice.ts index ccb267b78..c1ca20267 100644 --- a/apps/viewer/src/store/slices/pointCloudSlice.ts +++ b/apps/viewer/src/store/slices/pointCloudSlice.ts @@ -12,7 +12,7 @@ import type { StateCreator } from 'zustand'; -export type PointColorModeUi = 'rgb' | 'classification' | 'intensity' | 'height' | 'fixed'; +export type PointColorModeUi = 'rgb' | 'classification' | 'intensity' | 'height' | 'fixed' | 'deviation'; export type PointSizeModeUi = 'fixed-px' | 'adaptive-world' | 'attenuated'; export interface PointCloudSlice { @@ -31,6 +31,35 @@ export interface PointCloudSlice { pointCloudEdlEnabled: boolean; /** EDL strength multiplier. 0..3, default 1. */ pointCloudEdlStrength: number; + /** + * Per-ASPRS-class visibility bitmask (32 bits = covers classes + * 0..31, the LAS 1.4 standard range). Bit `i` set → class `i` + * visible. Default `0xFFFFFFFF` (all visible). Only point clouds + * carry classifications; meshes ignore this. + */ + pointCloudClassMask: number; + /** + * Stride-cull factor for the splat shader. 1 = render every point, + * N>1 = render every Nth point. Used by the section-plane slider's + * drag-preview path so dragging over a 100M-point scan stays + * responsive. Defaults to 1 (full density). + */ + pointCloudPreviewStride: number; + /** + * BIM↔scan deviation heatmap range. `centerOffset` shifts the + * "white" point off zero (handy when a scan has a global offset + * from the model); `halfRange` is the metres mapped to ±1 on the + * blue-white-red ramp. Defaults to (0, 0.05) — ±5cm. + */ + pointCloudDeviationCenterOffset: number; + pointCloudDeviationHalfRange: number; + /** + * True once `Renderer.computeDeviations` has populated the deviation + * buffers for the current point cloud + mesh set. UI gates the + * "Deviation" colour-mode option on this flag so users don't get a + * confusing all-blue rendering when nothing has been computed. + */ + pointCloudDeviationComputed: boolean; /** * Best-effort count of point cloud assets currently uploaded to the * renderer. Updated by ingest paths; UI uses it to show/hide the @@ -45,6 +74,14 @@ export interface PointCloudSlice { setPointCloudRoundShape: (enabled: boolean) => void; setPointCloudEdlEnabled: (enabled: boolean) => void; setPointCloudEdlStrength: (strength: number) => void; + setPointCloudClassMask: (mask: number) => void; + /** Toggle a single ASPRS class. `classId` is clamped to 0..31. */ + togglePointCloudClass: (classId: number) => void; + /** Set the stride-cull factor (1 = full density). */ + setPointCloudPreviewStride: (stride: number) => void; + setPointCloudDeviationCenterOffset: (m: number) => void; + setPointCloudDeviationHalfRange: (m: number) => void; + setPointCloudDeviationComputed: (computed: boolean) => void; setPointCloudAssetCount: (count: number) => void; incrementPointCloudAssetCount: (n?: number) => void; } @@ -67,6 +104,14 @@ export const POINT_CLOUD_DEFAULTS = { pointCloudRoundShape: true, pointCloudEdlEnabled: true, pointCloudEdlStrength: 1, + // 0xFFFFFFFF — all 32 classes visible. Stored as `-1 >>> 0` to + // keep the value as an unsigned 32-bit integer; JS doesn't have + // a u32 literal type so we round-trip through `>>> 0`. + pointCloudClassMask: 0xFFFFFFFF, + pointCloudPreviewStride: 1, + pointCloudDeviationCenterOffset: 0, + pointCloudDeviationHalfRange: 0.05, + pointCloudDeviationComputed: false, pointCloudAssetCount: 0, } as const; @@ -91,6 +136,32 @@ export const createPointCloudSlice: StateCreator set({ pointCloudEdlStrength: Number.isFinite(strength) ? Math.max(0, Math.min(3, strength)) : 1, }), + setPointCloudClassMask: (mask) => set({ + // Coerce through `>>> 0` to keep the stored value as an unsigned + // 32-bit integer; non-finite / negative inputs reset to "all on". + pointCloudClassMask: Number.isFinite(mask) ? (mask >>> 0) : 0xFFFFFFFF, + }), + togglePointCloudClass: (classId) => set((s) => { + const c = Math.max(0, Math.min(31, classId | 0)); + const bit = 1 << c; + // XOR flips the bit; coerce through `>>> 0` so the stored value + // stays in the unsigned 32-bit range. + return { pointCloudClassMask: (s.pointCloudClassMask ^ bit) >>> 0 }; + }), + setPointCloudPreviewStride: (stride) => set({ + pointCloudPreviewStride: Number.isFinite(stride) + ? Math.max(1, Math.min(256, Math.floor(stride) || 1)) + : 1, + }), + setPointCloudDeviationCenterOffset: (m) => set({ + pointCloudDeviationCenterOffset: Number.isFinite(m) ? m : 0, + }), + setPointCloudDeviationHalfRange: (m) => set({ + // halfRange must stay strictly positive — a zero or negative value + // would NaN the GPU ramp's division. Clamp to 0.1 mm minimum. + pointCloudDeviationHalfRange: Number.isFinite(m) ? Math.max(1e-4, m) : 0.05, + }), + setPointCloudDeviationComputed: (computed) => set({ pointCloudDeviationComputed: computed }), setPointCloudAssetCount: (count) => set({ pointCloudAssetCount: Number.isFinite(count) ? Math.max(0, count) : 0, }), diff --git a/apps/viewer/src/store/types.ts b/apps/viewer/src/store/types.ts index 86bbf1645..34b54d31d 100644 --- a/apps/viewer/src/store/types.ts +++ b/apps/viewer/src/store/types.ts @@ -120,6 +120,14 @@ export interface HoverState { entityId: number | null; screenX: number; screenY: number; + /** + * World-space hit position from the GPU pick (depth readback + + * inverse view-projection). Unset when the picker couldn't recover + * one (e.g. `pointCount === 0` clear, or the pick fell on the + * background). Useful for point-cloud hover tooltips where the + * synthetic entity has no surface property to display. + */ + worldXYZ?: { x: number; y: number; z: number }; } export interface ContextMenuState { diff --git a/packages/pointcloud/src/formats/ascii-points.test.ts b/packages/pointcloud/src/formats/ascii-points.test.ts new file mode 100644 index 000000000..a2a1b6dfe --- /dev/null +++ b/packages/pointcloud/src/formats/ascii-points.test.ts @@ -0,0 +1,135 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +import { describe, it, expect } from 'vitest'; +import { + decodeAsciiPoints, + probeAsciiPointsLayout, +} from './ascii-points.js'; + +const enc = new TextEncoder(); + +describe('probeAsciiPointsLayout', () => { + it('detects 3-column XYZ', () => { + const buf = enc.encode('1 2 3\n4 5 6\n'); + const layout = probeAsciiPointsLayout(buf, 'xyz'); + expect(layout?.columns).toBe(3); + expect(layout?.hasHeaderCount).toBe(false); + expect(layout?.fields).toEqual(['x', 'y', 'z']); + }); + + it('detects PTS header count + 7-column layout', () => { + const buf = enc.encode('2\n1 2 3 100 200 100 50\n4 5 6 200 50 100 200\n'); + const layout = probeAsciiPointsLayout(buf, 'pts'); + expect(layout?.hasHeaderCount).toBe(true); + expect(layout?.columns).toBe(7); + expect(layout?.fields).toEqual(['x', 'y', 'z', 'i', 'r', 'g', 'b']); + }); + + it('skips comment lines (#) when probing', () => { + const buf = enc.encode('# field export\n# generated 2026\n1.5 2.5 3.5\n'); + const layout = probeAsciiPointsLayout(buf, 'xyz'); + expect(layout?.columns).toBe(3); + }); + + it('returns null for binary content', () => { + const buf = new Uint8Array([0x00, 0xff, 0x12, 0x34, 0x56]); + expect(probeAsciiPointsLayout(buf, 'xyz')).toBeNull(); + }); + + it('falls through to 3-col positions for an unknown column count (xyz)', () => { + const buf = enc.encode('1 2 3 4 5\n'); + const layout = probeAsciiPointsLayout(buf, 'xyz'); + expect(layout?.columns).toBe(5); + expect(layout?.fields).toEqual(['x', 'y', 'z', 'skip', 'skip']); + }); + + it('rejects unknown column counts in PTS (no fallback)', () => { + const buf = enc.encode('1 2 3 4 5\n'); + expect(probeAsciiPointsLayout(buf, 'pts')).toBeNull(); + }); + + it('drops normals from 10-column PTS-with-normals', () => { + const buf = enc.encode('1 2 3 100 200 200 200 0 1 0\n'); + const layout = probeAsciiPointsLayout(buf, 'pts'); + expect(layout?.columns).toBe(10); + expect(layout?.fields).toEqual(['x', 'y', 'z', 'i', 'r', 'g', 'b', 'skip', 'skip', 'skip']); + }); + + it('drops normals from 9-column XYZ-with-RGB-and-normals', () => { + const buf = enc.encode('1 2 3 200 200 200 0 1 0\n'); + const layout = probeAsciiPointsLayout(buf, 'xyz'); + expect(layout?.columns).toBe(9); + expect(layout?.fields).toEqual(['x', 'y', 'z', 'r', 'g', 'b', 'skip', 'skip', 'skip']); + }); +}); + +describe('decodeAsciiPoints — XYZ', () => { + it('decodes 3-column positions', () => { + const buf = enc.encode('1 2 3\n4 5 6\n'); + const chunk = decodeAsciiPoints(buf, 'xyz'); + expect(chunk.pointCount).toBe(2); + expect(Array.from(chunk.positions)).toEqual([1, 2, 3, 4, 5, 6]); + expect(chunk.colors).toBeUndefined(); + expect(chunk.intensities).toBeUndefined(); + expect(chunk.bbox).toEqual({ min: [1, 2, 3], max: [4, 5, 6] }); + }); + + it('decodes 6-column positions + 0..255 RGB', () => { + const buf = enc.encode('1 2 3 255 0 0\n4 5 6 0 255 0\n'); + const chunk = decodeAsciiPoints(buf, 'xyz'); + expect(chunk.pointCount).toBe(2); + expect(chunk.colors).toBeDefined(); + // Auto-detected as 0..255 since values >1; renormalised to 0..1. + expect(chunk.colors![0]).toBeCloseTo(1, 3); + expect(chunk.colors![1]).toBeCloseTo(0, 3); + expect(chunk.colors![4]).toBeCloseTo(1, 3); + }); + + it('treats 0..1 RGB as already-normalised', () => { + const buf = enc.encode('1 2 3 1.0 0.5 0.0\n'); + const chunk = decodeAsciiPoints(buf, 'xyz'); + expect(chunk.colors![0]).toBeCloseTo(1.0, 3); + expect(chunk.colors![1]).toBeCloseTo(0.5, 3); + expect(chunk.colors![2]).toBeCloseTo(0.0, 3); + }); + + it('skips comment + blank lines, keeps data', () => { + const buf = enc.encode('# comment\n\n1 2 3\n// also comment\n4 5 6\n'); + const chunk = decodeAsciiPoints(buf, 'xyz'); + expect(chunk.pointCount).toBe(2); + expect(Array.from(chunk.positions)).toEqual([1, 2, 3, 4, 5, 6]); + }); + + it('rejects file with no recognisable data', () => { + const buf = enc.encode('# only comments\n# nothing else\n'); + expect(() => decodeAsciiPoints(buf, 'xyz')).toThrow(/does not look like ASCII/); + }); +}); + +describe('decodeAsciiPoints — PTS', () => { + it('respects header count + 7-column layout', () => { + // Standard PTS: count line, then X Y Z I(0..255) R G B(0..255) + const buf = enc.encode('2\n1 2 3 100 200 100 50\n4 5 6 200 50 100 200\n'); + const chunk = decodeAsciiPoints(buf, 'pts'); + expect(chunk.pointCount).toBe(2); + expect(Array.from(chunk.positions)).toEqual([1, 2, 3, 4, 5, 6]); + expect(chunk.intensities).toBeDefined(); + expect(chunk.colors).toBeDefined(); + // Intensity normalised to u16; raw 100 / 200 in 0..255 source. + expect(chunk.intensities![0]).toBeGreaterThan(0); + expect(chunk.intensities![1]).toBeGreaterThan(chunk.intensities![0]); + // RGB renormalised from 0..255 → 0..1. + expect(chunk.colors![0]).toBeCloseTo(200 / 255, 3); + }); + + it('handles 4-column intensity-only PTS', () => { + const buf = enc.encode('1 2 3 0.5\n4 5 6 0.75\n'); + const chunk = decodeAsciiPoints(buf, 'pts'); + expect(chunk.pointCount).toBe(2); + expect(chunk.colors).toBeUndefined(); + expect(chunk.intensities).toBeDefined(); + expect(chunk.intensities![0]).toBeCloseTo(Math.round(0.5 * 65535), 0); + }); +}); diff --git a/packages/pointcloud/src/formats/ascii-points.ts b/packages/pointcloud/src/formats/ascii-points.ts new file mode 100644 index 000000000..43aeb4f74 --- /dev/null +++ b/packages/pointcloud/src/formats/ascii-points.ts @@ -0,0 +1,283 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * PTS / XYZ ASCII point reader. + * + * Both formats are line-oriented plain-text scans. They differ only + * in the optional first-line point count (PTS may have one; XYZ + * never does) and in convention rather than syntax. + * + * Supported per-line layouts (auto-detected from column count of the + * first data line): + * 3 cols → X Y Z + * 4 cols → X Y Z I (intensity, normalised 0..1 or 0..255) + * 6 cols → X Y Z R G B (RGB 0..255) + * 7 cols → X Y Z I R G B (PTS standard layout) + * 9 cols → X Y Z I R G B Nx Ny Nz (normals dropped) + * + * Lines starting with `#`, `//`, or blank are skipped (comment + * support is non-standard but common in field exports). + * + * The reader is intentionally tolerant: any column count outside the + * known set falls through to "X Y Z plus discarded extras" so a file + * with weird custom columns still loads. + */ + +import type { DecodedPointChunk, PointCloudBBox } from '../types.js'; + +export type AsciiPointsFormat = 'pts' | 'xyz'; + +export interface AsciiPointsLayout { + /** Number of whitespace-separated columns per data line. */ + columns: number; + /** True if the first non-comment line is a single-integer point count. */ + hasHeaderCount: boolean; + /** Resolved per-column meaning for the auto-detected layout. */ + fields: AsciiPointsField[]; +} + +export type AsciiPointsField = 'x' | 'y' | 'z' | 'i' | 'r' | 'g' | 'b' | 'skip'; + +const TEXT_DECODER = new TextDecoder(); + +/** + * Probe the first ~16 KB to decide format + column layout. + * + * Looks at the first non-blank/non-comment line: + * - If it's a single integer, treat as a header point count (PTS). + * - Otherwise the column count of that line determines the layout. + * + * Returns null when the buffer doesn't look like ASCII point data + * (e.g. binary content, non-numeric tokens). Caller can then surface + * a clear "not a point cloud" error. + */ +export function probeAsciiPointsLayout( + buffer: Uint8Array, + format: AsciiPointsFormat, +): AsciiPointsLayout | null { + const probeLen = Math.min(16384, buffer.length); + const text = TEXT_DECODER.decode(buffer.subarray(0, probeLen)); + const lines = text.split(/\r?\n/); + let firstDataLine: string | null = null; + let hasHeaderCount = false; + for (let i = 0; i < lines.length; i++) { + const trimmed = lines[i].trim(); + if (trimmed.length === 0) continue; + if (trimmed.startsWith('#') || trimmed.startsWith('//')) continue; + // PTS often begins with a single integer point count. XYZ never + // does — but we still accept it as a header if it parses cleanly. + if (i === 0 || !hasHeaderCount) { + const tokens = trimmed.split(/\s+/); + if (tokens.length === 1 && /^\d+$/.test(tokens[0])) { + hasHeaderCount = true; + continue; + } + } + firstDataLine = trimmed; + break; + } + if (firstDataLine === null) return null; + const tokens = firstDataLine.split(/\s+/); + const columns = tokens.length; + // Sanity: every token must parse as a finite number. + for (const t of tokens) { + if (!Number.isFinite(Number(t))) return null; + } + const fields = layoutFromColumnCount(columns, format); + if (!fields) return null; + return { columns, hasHeaderCount, fields }; +} + +/** + * Resolve a column count to its field roles. Returns null when the + * count isn't one of the known shapes — caller should surface a clear + * "unsupported column layout" error rather than guess. + */ +function layoutFromColumnCount( + columns: number, + format: AsciiPointsFormat, +): AsciiPointsField[] | null { + switch (columns) { + case 3: + return ['x', 'y', 'z']; + case 4: + // Convention: 4-col PTS is X Y Z I; 4-col XYZ is rarer and + // sometimes X Y Z R (single-channel grayscale). We treat both + // as intensity to keep the logic shared — single-channel + // colour shows up greyscale anyway. + return ['x', 'y', 'z', 'i']; + case 6: + return ['x', 'y', 'z', 'r', 'g', 'b']; + case 7: + // Canonical PTS (X Y Z I R G B) and the most common XYZ-with- + // colour-and-intensity layout. + return ['x', 'y', 'z', 'i', 'r', 'g', 'b']; + case 9: + // X Y Z R G B Nx Ny Nz — colour but no intensity, drop normals. + return ['x', 'y', 'z', 'r', 'g', 'b', 'skip', 'skip', 'skip']; + case 10: + // X Y Z I R G B Nx Ny Nz — full PTS-with-normals. Drop normals. + return ['x', 'y', 'z', 'i', 'r', 'g', 'b', 'skip', 'skip', 'skip']; + default: + // Tolerant fallback for unknown layouts — still emit positions + // from the first three columns so the cloud loads. Discard the + // rest. Better than rejecting the whole file. + if (columns >= 3 && format === 'xyz') { + const fields: AsciiPointsField[] = ['x', 'y', 'z']; + for (let i = 3; i < columns; i++) fields.push('skip'); + return fields; + } + return null; + } +} + +/** + * Decode an entire ASCII point file into a single `DecodedPointChunk`. + * + * For multi-gigabyte scans the streaming source (`AsciiPointsStreamingSource`) + * should be preferred — this path materialises everything in memory. + * + * Per-channel handling: + * - Intensity: if any value > 1.0, treat the column as 0..255 and + * scale to u16. Otherwise treat as 0..1 and scale to u16. Mixed + * ranges within one file are uncommon; we make the call once. + * - RGB: if any channel > 1.0, treat the columns as 0..255. Else 0..1. + */ +export function decodeAsciiPoints( + bytes: Uint8Array, + format: AsciiPointsFormat, +): DecodedPointChunk { + const layout = probeAsciiPointsLayout(bytes, format); + if (!layout) { + throw new Error(`${format.toUpperCase()}: file does not look like ASCII point data`); + } + const text = TEXT_DECODER.decode(bytes); + return decodeAsciiPointsFromText(text, layout); +} + +/** Same as `decodeAsciiPoints` but takes pre-decoded text. */ +export function decodeAsciiPointsFromText( + text: string, + layout: AsciiPointsLayout, +): DecodedPointChunk { + const lines = text.split(/\r?\n/); + // Pre-pass: count valid data lines so we can allocate exactly. + // Cheap (no parsing) and saves the typed-array growth dance. + let dataLineCount = 0; + let headerSkipped = !layout.hasHeaderCount; + for (const raw of lines) { + const trimmed = raw.trim(); + if (trimmed.length === 0) continue; + if (trimmed.startsWith('#') || trimmed.startsWith('//')) continue; + if (!headerSkipped) { + headerSkipped = true; + continue; + } + dataLineCount++; + } + + const xIdx = layout.fields.indexOf('x'); + const yIdx = layout.fields.indexOf('y'); + const zIdx = layout.fields.indexOf('z'); + const iIdx = layout.fields.indexOf('i'); + const rIdx = layout.fields.indexOf('r'); + const gIdx = layout.fields.indexOf('g'); + const bIdx = layout.fields.indexOf('b'); + const hasIntensity = iIdx >= 0; + const hasColor = rIdx >= 0 && gIdx >= 0 && bIdx >= 0; + + const positions = new Float32Array(dataLineCount * 3); + const intensitiesRaw = hasIntensity ? new Float32Array(dataLineCount) : null; + const colorsRaw = hasColor ? new Float32Array(dataLineCount * 3) : null; + + let written = 0; + let intensityMax = 0; + let colorMax = 0; + let bboxMinX = Infinity, bboxMinY = Infinity, bboxMinZ = Infinity; + let bboxMaxX = -Infinity, bboxMaxY = -Infinity, bboxMaxZ = -Infinity; + + headerSkipped = !layout.hasHeaderCount; + for (const raw of lines) { + const trimmed = raw.trim(); + if (trimmed.length === 0) continue; + if (trimmed.startsWith('#') || trimmed.startsWith('//')) continue; + if (!headerSkipped) { + headerSkipped = true; + continue; + } + const tokens = trimmed.split(/\s+/); + if (tokens.length < layout.columns) continue; + const x = Number(tokens[xIdx]); + const y = Number(tokens[yIdx]); + const z = Number(tokens[zIdx]); + if (!Number.isFinite(x) || !Number.isFinite(y) || !Number.isFinite(z)) continue; + positions[written * 3] = x; + positions[written * 3 + 1] = y; + positions[written * 3 + 2] = z; + if (x < bboxMinX) bboxMinX = x; if (x > bboxMaxX) bboxMaxX = x; + if (y < bboxMinY) bboxMinY = y; if (y > bboxMaxY) bboxMaxY = y; + if (z < bboxMinZ) bboxMinZ = z; if (z > bboxMaxZ) bboxMaxZ = z; + if (intensitiesRaw) { + const v = Number(tokens[iIdx]); + const f = Number.isFinite(v) ? v : 0; + intensitiesRaw[written] = f; + if (f > intensityMax) intensityMax = f; + } + if (colorsRaw) { + const r = Number(tokens[rIdx]); + const g = Number(tokens[gIdx]); + const b = Number(tokens[bIdx]); + const rf = Number.isFinite(r) ? r : 0; + const gf = Number.isFinite(g) ? g : 0; + const bf = Number.isFinite(b) ? b : 0; + colorsRaw[written * 3] = rf; + colorsRaw[written * 3 + 1] = gf; + colorsRaw[written * 3 + 2] = bf; + const m = Math.max(rf, gf, bf); + if (m > colorMax) colorMax = m; + } + written++; + } + + // Trim if some lines got rejected. + const trimmedPositions = written === dataLineCount ? positions : positions.subarray(0, written * 3); + + // Normalise intensity to u16 (0..65535). Detect 0..255 vs 0..1 vs + // raw sensor by the observed maximum. + let intensities: Uint16Array | undefined; + if (intensitiesRaw) { + intensities = new Uint16Array(written); + const scale = intensityMax > 1.0 + ? (intensityMax > 255 ? 65535 / intensityMax : 65535 / 255) + : 65535; + for (let i = 0; i < written; i++) { + const v = intensitiesRaw[i] * scale; + intensities[i] = v < 0 ? 0 : v > 65535 ? 65535 : Math.round(v); + } + } + + // Normalise colour to 0..1 floats. + let colors: Float32Array | undefined; + if (colorsRaw) { + colors = new Float32Array(written * 3); + const scale = colorMax > 1.0 ? 1 / 255 : 1; + for (let i = 0; i < written * 3; i++) { + const v = colorsRaw[i] * scale; + colors[i] = v < 0 ? 0 : v > 1 ? 1 : v; + } + } + + const bbox: PointCloudBBox = written === 0 + ? { min: [0, 0, 0], max: [0, 0, 0] } + : { min: [bboxMinX, bboxMinY, bboxMinZ], max: [bboxMaxX, bboxMaxY, bboxMaxZ] }; + + return { + positions: written === dataLineCount ? positions : new Float32Array(trimmedPositions), + colors, + intensities, + pointCount: written, + bbox, + }; +} diff --git a/packages/pointcloud/src/formats/e57-decode.ts b/packages/pointcloud/src/formats/e57-decode.ts new file mode 100644 index 000000000..9748fea6c --- /dev/null +++ b/packages/pointcloud/src/formats/e57-decode.ts @@ -0,0 +1,498 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * E57 binary-section decoder for a single Data3D scan. + * + * Walks DataPackets at `entry.binaryFileOffset` (in the LOGICAL + * post-CRC view) and decodes per-record bytestreams as Float32 / + * Float64 / Integer / ScaledInteger columns. ScaledInteger is a + * bit-packed integer with a per-field scale + offset (E57 spec + * §6.3.4) — common in Faro / Trimble / Leica exports. + */ + +import type { DecodedPointChunk, PointCloudBBox } from '../types.js'; +import { findField, type Data3DEntry, type PrototypeField } from './e57-xml.js'; + +/** + * Decode the binary section starting at `entry.binaryFileOffset` in the + * logical-bytes view. NOTE: `binaryFileOffset` here must already point + * at the first DataPacket (i.e. AFTER the 32-byte CompressedVector + * section header) — `decodeE57` does this conversion via + * `resolveCompressedVectorDataOffset`. Callers passing the raw XML + * offset directly will see a "bytestreamCount ≠ prototype length" + * mismatch. + * + * Supports Float (single/double), ScaledInteger (bit-packed integer + * with scale/offset), and Integer for cartesianX/Y/Z + colorRed/ + * Green/Blue + intensity. Other prototype fields are honoured for + * stride math but discarded. + */ +export function decodeE57Scan(logical: Uint8Array, entry: Data3DEntry): DecodedPointChunk { + const xField = findField(entry.prototype, 'cartesianX'); + const yField = findField(entry.prototype, 'cartesianY'); + const zField = findField(entry.prototype, 'cartesianZ'); + if (!xField || !yField || !zField) { + throw new Error('E57: prototype missing cartesianX/Y/Z'); + } + for (const f of [xField, yField, zField]) { + if (f.kind === 'Integer') { + // Plain integer cartesian coords don't appear in any real exporter + // we've seen — the spec uses ScaledInteger when the cartesian is + // integer-quantised. Fail clearly rather than silently producing + // unscaled metres (or whatever the integer happens to be). + throw new Error( + `E57: cartesian${f.name.slice(-1)} encoded as plain Integer (only Float / ScaledInteger supported)`, + ); + } + } + const rField = findField(entry.prototype, 'colorRed'); + const gField = findField(entry.prototype, 'colorGreen'); + const bField = findField(entry.prototype, 'colorBlue'); + const hasRgb = !!(rField && gField && bField); + const iField = findField(entry.prototype, 'intensity'); + // Classification: not in the standard ASTM E2807 prototype, but + // some exporters (CloudCompare, custom LIDAR pipelines) add a + // `classification` Integer / ScaledInteger field. We honour it + // when present so the splat shader's classification colour mode + // works on those files. Stock Faro / Leica scans don't carry it + // — that's why classification mode shows everything as class 0 + // (the spec maps that to "Created, never classified"). + const cField = findField(entry.prototype, 'classification'); + + const positions = new Float32Array(entry.recordCount * 3); + const colors = hasRgb ? new Float32Array(entry.recordCount * 3) : undefined; + // Allocate intensity for any supported field kind. ScaledInteger + // and Integer (u8 / u16) are both common in real exports. + const intensities = iField && (iField.kind === 'Float' || iField.kind === 'Integer' || iField.kind === 'ScaledInteger') + ? new Uint16Array(entry.recordCount) + : undefined; + const classifications = cField && (cField.kind === 'Integer' || cField.kind === 'ScaledInteger') + ? new Uint8Array(entry.recordCount) + : undefined; + + // Walk DataPackets starting at binaryFileOffset. + // Packet header (4 bytes): + // byte 0: packetType (1=data, 2=index, 3=empty) + // byte 1: packetFlags (bit 0 = compressorRestart) + // bytes 2..3: packetLogicalLength - 1 (LE u16; total packet bytes minus 1) + let offset = entry.binaryFileOffset; + const view = new DataView(logical.buffer, logical.byteOffset, logical.byteLength); + let written = 0; + + while (written < entry.recordCount && offset < logical.length) { + if (offset + 4 > logical.length) { + throw new Error('E57: truncated DataPacket header'); + } + const packetType = view.getUint8(offset); + const packetLogicalLength = view.getUint16(offset + 2, true) + 1; + if (packetType !== 1) { + // Skip non-data packets (index/empty); they may appear interleaved. + offset += packetLogicalLength; + continue; + } + const packetEnd = offset + packetLogicalLength; + if (packetEnd > logical.length) { + throw new Error('E57: DataPacket runs past end of logical bytes'); + } + // Data packet header beyond the common 4 bytes: + // byte 4..5: bytestreamCount (u16 LE) + // then `bytestreamCount` × u16 LE = bytestreamByteCount[] + // then payload (concatenated bytestreams, in prototype order) + // + // CRCs in E57 live at the PAGE level (4 bytes per 1024-byte + // physical page, stripped by `stripPageCrc` before we get here). + // There is no per-packet trailing CRC — the bytestreams fill the + // packet exactly up to `packetEnd`. An earlier version of this + // code subtracted 4 bytes assuming a packet-level CRC, which + // false-positived the bounds checks below on real-world Faro / + // Trimble exports whose last bytestream ends within the final + // 4 bytes of the packet. + const payloadEnd = packetEnd; + if (offset + 6 > payloadEnd) { + throw new Error('E57: truncated DataPacket header'); + } + const bytestreamCount = view.getUint16(offset + 4, true); + if (bytestreamCount !== entry.prototype.length) { + throw new Error( + `E57: packet bytestreamCount (${bytestreamCount}) ≠ prototype length (${entry.prototype.length})`, + ); + } + const bytestreamLengths: number[] = []; + let cursor = offset + 6; + for (let i = 0; i < bytestreamCount; i++) { + if (cursor + 2 > payloadEnd) { + throw new Error('E57: truncated bytestream length table'); + } + bytestreamLengths.push(view.getUint16(cursor, true)); + cursor += 2; + } + const fieldOffsets = new Map(); + let streamCursor = cursor; + for (let i = 0; i < bytestreamCount; i++) { + // Each bytestream must fit inside the packet payload — a corrupt + // file could otherwise have us read into the next packet or + // even past `logical`. + if (streamCursor + bytestreamLengths[i] > payloadEnd) { + throw new Error( + `E57: bytestream ${entry.prototype[i].name} (${bytestreamLengths[i]} bytes) ` + + `runs past packet payload at offset ${streamCursor}`, + ); + } + fieldOffsets.set(entry.prototype[i].name, { start: streamCursor, length: bytestreamLengths[i] }); + streamCursor += bytestreamLengths[i]; + } + + // Per-axis packet capacity now varies by field kind: Float uses + // floor(length / byteSize), ScaledInteger uses floor(length * 8 / + // bitsPerRecord). Take the min so a mixed-encoding packet picks + // the shortest stream. + const xPos = fieldOffsets.get('cartesianX')!; + const yPos = fieldOffsets.get('cartesianY')!; + const zPos = fieldOffsets.get('cartesianZ')!; + const xCapacity = floatOrSiPointCapacity(xField, xPos.length); + const yCapacity = floatOrSiPointCapacity(yField, yPos.length); + const zCapacity = floatOrSiPointCapacity(zField, zPos.length); + const pointsInPacket = Math.min(xCapacity, yCapacity, zCapacity); + const take = Math.min(pointsInPacket, entry.recordCount - written); + + readCartesianStream(logical, view, xField, xPos.start, positions, written, take, 0); + readCartesianStream(logical, view, yField, yPos.start, positions, written, take, 1); + readCartesianStream(logical, view, zField, zPos.start, positions, written, take, 2); + + if (colors && rField && gField && bField) { + writeColorChannel(view, fieldOffsets.get('colorRed')!.start, rField, colors, written, take, 0, logical); + writeColorChannel(view, fieldOffsets.get('colorGreen')!.start, gField, colors, written, take, 1, logical); + writeColorChannel(view, fieldOffsets.get('colorBlue')!.start, bField, colors, written, take, 2, logical); + } + if (intensities && iField) { + readIntensityStream(logical, view, iField, fieldOffsets.get('intensity')!.start, intensities, written, take); + } + if (classifications && cField) { + readClassificationStream( + logical, view, cField, + fieldOffsets.get('classification')!.start, + classifications, written, take, + ); + } + + written += take; + offset = packetEnd; + } + + if (written < entry.recordCount) { + // Real-world files sometimes report counts a few records higher + // than what's actually stored; trim positions to the actual count + // so downstream code doesn't see uninitialised tail values. + return finalize( + positions.subarray(0, written * 3), + colors?.subarray(0, written * 3), + intensities?.subarray(0, written), + classifications?.subarray(0, written), + written, + ); + } + return finalize(positions, colors, intensities, classifications, entry.recordCount); +} + +function writeColorChannel( + view: DataView, + start: number, + field: PrototypeField, + colors: Float32Array, + written: number, + take: number, + channelOffset: 0 | 1 | 2, + bytes: Uint8Array, +): void { + if (field.kind === 'Float') { + const stride = field.precision === 'single' ? 4 : 8; + for (let i = 0; i < take; i++) { + const v = stride === 4 ? view.getFloat32(start + i * stride, true) : view.getFloat64(start + i * stride, true); + colors[(written + i) * 3 + channelOffset] = clamp01(v); + } + } else if (field.kind === 'Integer') { + // Pick element width from the declared range. E57 producers use + // either u8 (0..255 — most common) or u16 (0..65535). Both + // appear in real files; assuming u8 distorts u16-encoded colors. + const min = field.minimum ?? 0; + const max = field.maximum ?? 255; + const span = max - min; + const inv = span > 0 ? 1 / span : 1; + const widest = Math.max(Math.abs(min), Math.abs(max)); + const stride = widest > 255 ? 2 : 1; + const signed = min < 0; + for (let i = 0; i < take; i++) { + const off = start + i * stride; + const raw = stride === 2 + ? (signed ? view.getInt16(off, true) : view.getUint16(off, true)) + : (signed ? view.getInt8(off) : view.getUint8(off)); + colors[(written + i) * 3 + channelOffset] = clamp01((raw - min) * inv); + } + } else { + // ScaledInteger colour: bit-packed integer normalised by the + // declared min/max range. The "scale + offset" prototype attrs + // still apply per spec but for colour they always normalise to + // the declared range, so we just remap [minimum, maximum] → [0, 1] + // like Integer colour does. + const min = field.minimum ?? 0; + const max = field.maximum ?? 1; + const span = max - min; + const inv = span > 0 ? 1 / span : 1; + const bitsPerRecord = scaledIntegerBitsPerRecord(field); + const startBit = start * 8; + for (let i = 0; i < take; i++) { + const raw = readBitsLE(bytes, startBit + i * bitsPerRecord, bitsPerRecord); + colors[(written + i) * 3 + channelOffset] = clamp01(raw * inv); + } + } +} + +/** + * Read N points from a cartesian (X / Y / Z) bytestream into the + * positions array. Float: straight DataView reads; ScaledInteger: + * bit-pack walk plus per-record `(raw + minimum) * scale + offset`. + * + * `axis` selects which of the three position slots to write to + * (0 = X, 1 = Y, 2 = Z). + */ +function readCartesianStream( + bytes: Uint8Array, + view: DataView, + field: PrototypeField, + start: number, + positions: Float32Array, + written: number, + take: number, + axis: 0 | 1 | 2, +): void { + if (field.kind === 'Float') { + const stride = field.precision === 'single' ? 4 : 8; + if (stride === 4) { + for (let i = 0; i < take; i++) { + positions[(written + i) * 3 + axis] = view.getFloat32(start + i * stride, true); + } + } else { + for (let i = 0; i < take; i++) { + positions[(written + i) * 3 + axis] = view.getFloat64(start + i * stride, true); + } + } + return; + } + // ScaledInteger: stream stores `raw_int = (value - minimum)` as + // an unsigned bit-pack; decoded float = (raw_int + minimum) * scale + offset. + const bitsPerRecord = scaledIntegerBitsPerRecord(field); + const minimum = field.minimum ?? 0; + const scale = field.scale ?? 1; + const offset = field.offset ?? 0; + const startBit = start * 8; + for (let i = 0; i < take; i++) { + const raw = readBitsLE(bytes, startBit + i * bitsPerRecord, bitsPerRecord); + positions[(written + i) * 3 + axis] = (raw + minimum) * scale + offset; + } +} + +/** + * Read N intensity samples from a bytestream and normalise to u16. + * Handles Float, Integer, and ScaledInteger kinds. + */ +function readIntensityStream( + bytes: Uint8Array, + view: DataView, + field: PrototypeField, + start: number, + intensities: Uint16Array, + written: number, + take: number, +): void { + if (field.kind === 'Float') { + const stride = field.precision === 'single' ? 4 : 8; + for (let i = 0; i < take; i++) { + const v = stride === 4 ? view.getFloat32(start + i * stride, true) : view.getFloat64(start + i * stride, true); + intensities[written + i] = Math.min(65535, Math.max(0, Math.round(v * 65535))); + } + return; + } + if (field.kind === 'Integer') { + const min = field.minimum ?? 0; + const max = field.maximum ?? 65535; + const span = max - min; + const inv = span > 0 ? 1 / span : 1; + const widest = Math.max(Math.abs(min), Math.abs(max)); + const stride = widest > 255 ? 2 : 1; + const signed = min < 0; + for (let i = 0; i < take; i++) { + const off = start + i * stride; + const raw = stride === 2 + ? (signed ? view.getInt16(off, true) : view.getUint16(off, true)) + : (signed ? view.getInt8(off) : view.getUint8(off)); + const norm = (raw - min) * inv; + intensities[written + i] = Math.min(65535, Math.max(0, Math.round(norm * 65535))); + } + return; + } + // ScaledInteger intensity: range-remap from the bit-pack walk. + const bitsPerRecord = scaledIntegerBitsPerRecord(field); + const minimum = field.minimum ?? 0; + const maximum = field.maximum ?? minimum; + const span = maximum - minimum; + const inv = span > 0 ? 1 / span : 1; + const startBit = start * 8; + for (let i = 0; i < take; i++) { + const raw = readBitsLE(bytes, startBit + i * bitsPerRecord, bitsPerRecord); + intensities[written + i] = Math.min(65535, Math.max(0, Math.round(raw * inv * 65535))); + } +} + +/** + * E57 §6.3.4: bitsPerRecord = ceil(log2(maximum - minimum + 1)). + * Caps at 53 bits (Number-precision limit). Real exporters top out + * around 32 bits. + */ +function scaledIntegerBitsPerRecord(field: PrototypeField): number { + const min = field.minimum ?? 0; + const max = field.maximum ?? min; + const span = Math.max(0, max - min); + if (span === 0) return 1; + const bits = Math.ceil(Math.log2(span + 1)); + if (bits > 53) { + throw new Error( + `E57: ScaledInteger field "${field.name}" needs ${bits} bits — exceeds the 53-bit Number-precision limit`, + ); + } + return Math.max(1, bits); +} + +/** Float / Integer / ScaledInteger → max points that fit in `lengthBytes`. */ +function floatOrSiPointCapacity(field: PrototypeField, lengthBytes: number): number { + if (field.kind === 'Float') { + const byteSize = field.precision === 'single' ? 4 : 8; + return Math.floor(lengthBytes / byteSize); + } + if (field.kind === 'ScaledInteger') { + const bits = scaledIntegerBitsPerRecord(field); + return Math.floor((lengthBytes * 8) / bits); + } + // Integer: same width selection as writeColorChannel. + const min = field.minimum ?? 0; + const max = field.maximum ?? 255; + const widest = Math.max(Math.abs(min), Math.abs(max)); + const byteSize = widest > 255 ? 2 : 1; + return Math.floor(lengthBytes / byteSize); +} + +/** + * Read `bitsPerRecord` bits starting at `bitOffset` from `bytes`, + * LSB-first within each byte (E57 spec convention). Uses + * `Math.pow(2, n)` instead of `<< n` to keep precision up to 53 bits. + */ +function readBitsLE(bytes: Uint8Array, bitOffset: number, bitsPerRecord: number): number { + let value = 0; + let bitsRead = 0; + let cur = bitOffset >>> 3; + let inByte = bitOffset & 7; + while (bitsRead < bitsPerRecord) { + const avail = 8 - inByte; + const take = Math.min(avail, bitsPerRecord - bitsRead); + const mask = (1 << take) - 1; + const piece = (bytes[cur] >>> inByte) & mask; + value += piece * Math.pow(2, bitsRead); + bitsRead += take; + inByte = 0; + cur++; + } + return value; +} + +function finalize( + positions: Float32Array, + colors: Float32Array | undefined, + intensities: Uint16Array | undefined, + classifications: Uint8Array | undefined, + pointCount: number, +): DecodedPointChunk { + return { + positions: new Float32Array(positions), + colors: colors ? new Float32Array(colors) : undefined, + intensities: intensities ? new Uint16Array(intensities) : undefined, + classifications: classifications ? new Uint8Array(classifications) : undefined, + pointCount, + bbox: computeBBox(positions), + }; +} + +/** + * Read N classification samples from a bytestream into a Uint8Array. + * Handles the two encodings real exporters use: plain Integer (u8 / + * u16 picked from declared range) and ScaledInteger (bit-packed). + * Values are clamped into the u8 range — ASPRS LAS 1.4 only defines + * up to class 18, so >255 is meaningless but we don't error on it. + */ +function readClassificationStream( + bytes: Uint8Array, + view: DataView, + field: PrototypeField, + start: number, + classifications: Uint8Array, + written: number, + take: number, +): void { + if (field.kind === 'Integer') { + const min = field.minimum ?? 0; + const max = field.maximum ?? 255; + const widest = Math.max(Math.abs(min), Math.abs(max)); + const stride = widest > 255 ? 2 : 1; + const signed = min < 0; + for (let i = 0; i < take; i++) { + const off = start + i * stride; + const raw = stride === 2 + ? (signed ? view.getInt16(off, true) : view.getUint16(off, true)) + : (signed ? view.getInt8(off) : view.getUint8(off)); + // Class IDs are absolute labels (ASPRS LAS 1.4 0..31), not + // range-normalised offsets. The raw byte IS the class — don't + // subtract `minimum`. Mirrors the ScaledInteger branch below + // which uses `raw + minimum` to recover the original value. + classifications[written + i] = Math.max(0, Math.min(255, raw)); + } + return; + } + // ScaledInteger: classification is conceptually an integer but + // some exporters declare a scale anyway. Reader the raw bits and + // ignore scale/offset (no real meaning for class IDs). + const bitsPerRecord = scaledIntegerBitsPerRecord(field); + const minimum = field.minimum ?? 0; + const startBit = start * 8; + for (let i = 0; i < take; i++) { + const raw = readBitsLE(bytes, startBit + i * bitsPerRecord, bitsPerRecord); + classifications[written + i] = Math.max(0, Math.min(255, raw + minimum)); + } +} + +function clamp01(v: number): number { + return v < 0 ? 0 : v > 1 ? 1 : v; +} + +export function computeBBox(positions: Float32Array): PointCloudBBox { + // Empty / non-aligned input yields ±Infinity bounds, which poisons + // camera fit-to-view and section-plane math downstream. Return a + // finite zero-bbox instead. + if (positions.length < 3) { + return { min: [0, 0, 0], max: [0, 0, 0] }; + } + let minX = Infinity, minY = Infinity, minZ = Infinity; + let maxX = -Infinity, maxY = -Infinity, maxZ = -Infinity; + let any = false; + for (let i = 0; i + 2 < positions.length; i += 3) { + const x = positions[i], y = positions[i + 1], z = positions[i + 2]; + // Skip non-finite coords rather than letting them poison the bbox. + // A single NaN/Infinity from a corrupt scan would otherwise propagate. + if (!Number.isFinite(x) || !Number.isFinite(y) || !Number.isFinite(z)) continue; + any = true; + if (x < minX) minX = x; if (x > maxX) maxX = x; + if (y < minY) minY = y; if (y > maxY) maxY = y; + if (z < minZ) minZ = z; if (z > maxZ) maxZ = z; + } + if (!any) return { min: [0, 0, 0], max: [0, 0, 0] }; + return { min: [minX, minY, minZ], max: [maxX, maxY, maxZ] }; +} diff --git a/packages/pointcloud/src/formats/e57-page.ts b/packages/pointcloud/src/formats/e57-page.ts new file mode 100644 index 000000000..e533b1238 --- /dev/null +++ b/packages/pointcloud/src/formats/e57-page.ts @@ -0,0 +1,128 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * E57 file header + page-CRC handling. + * + * The file is divided into `pageSize`-byte physical pages, each + * carrying a 4-byte CRC32-C tail. XML offsets and binary-section + * offsets in the XML reference the LOGICAL byte stream — i.e. with + * those CRC tails stripped. Everything in this module helps callers + * convert between the two views. + */ + +const E57_MAGIC = 'ASTM-E57'; + +export interface E57FileHeader { + majorVersion: number; + minorVersion: number; + fileLogicalSize: number; + xmlLogicalOffset: number; + xmlLogicalLength: number; + pageSize: number; +} + +/** Read the 48-byte FileHeader. Throws on bad magic. */ +export function parseE57FileHeader(bytes: Uint8Array): E57FileHeader { + if (bytes.length < 48) throw new Error('E57: header truncated (need 48 bytes)'); + const magic = String.fromCharCode(...bytes.subarray(0, 8)); + if (magic !== E57_MAGIC) { + throw new Error(`E57: bad magic "${magic}" (expected "${E57_MAGIC}")`); + } + const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength); + return { + majorVersion: view.getUint32(8, true), + minorVersion: view.getUint32(12, true), + fileLogicalSize: readU64LE(view, 16), + // Physical XML offset → we convert to logical below; xmlLogicalLength + // is the byte length AFTER stripping page CRCs. + xmlLogicalOffset: physicalToLogical(readU64LE(view, 24), readU64LE(view, 40)), + xmlLogicalLength: readU64LE(view, 32), + pageSize: readU64LE(view, 40), + }; +} + +/** + * Strip the 4-byte CRC tail from each `pageSize`-byte physical page. + * + * Returns a freshly-allocated buffer of "logical" bytes — the form that + * XML offsets and CompressedVector data offsets reference. + * + * `pageSize` is read from the header and is conventionally 1024. + */ +export function stripPageCrc(bytes: Uint8Array, pageSize: number): Uint8Array { + if (pageSize <= 4) throw new Error('E57: pageSize too small'); + const payloadPerPage = pageSize - 4; + const fullPages = Math.floor(bytes.length / pageSize); + const tail = bytes.length - fullPages * pageSize; + // Trailing partial page (if any) still carries 4 CRC bytes when complete; + // when the file ends mid-page we can't trust those tail bytes, so we + // stop at the last complete page boundary. + const out = new Uint8Array(fullPages * payloadPerPage + Math.max(0, tail - 4)); + let dst = 0; + for (let p = 0; p < fullPages; p++) { + const src = p * pageSize; + out.set(bytes.subarray(src, src + payloadPerPage), dst); + dst += payloadPerPage; + } + if (tail > 4) { + const src = fullPages * pageSize; + out.set(bytes.subarray(src, src + tail - 4), dst); + } + return out; +} + +/** Convert a physical (CRC-paged) offset to the equivalent logical offset. */ +export function physicalToLogical(physical: number, pageSize: number): number { + const payloadPerPage = pageSize - 4; + const pages = Math.floor(physical / pageSize); + const within = physical - pages * pageSize; + return pages * payloadPerPage + within; +} + +/** + * Read a CompressedVector binary-section header (E57 spec §6.4.2) and + * return the LOGICAL byte offset where its DataPackets actually start. + * + * Layout (32 bytes): + * [ 0] u8 sectionId (must == 1 for CompressedVector) + * [ 1] u8[7] reserved + * [ 8] u64 LE sectionLogicalLength + * [16] u64 LE dataPhysicalOffset + * [24] u64 LE indexPhysicalOffset + * + * The XML's `points@fileOffset` points at this section header — NOT at + * the first DataPacket. Reading packets straight at `fileOffset` puts + * the parser ~32 bytes off and the first u16 it reads is the low half + * of `sectionLogicalLength`, which usually decodes as a bytestreamCount + * of 0 (matched the user-reported `bytestreamCount (0) ≠ prototype + * length (7)` error exactly). + */ +export function resolveCompressedVectorDataOffset( + logical: Uint8Array, + physicalSectionOffset: number, + pageSize: number, +): number { + const sectionLogical = physicalToLogical(physicalSectionOffset, pageSize); + if (sectionLogical + 32 > logical.length) { + throw new Error( + `E57: CompressedVector section header at logical ${sectionLogical} runs past end of file (length ${logical.length})`, + ); + } + const view = new DataView(logical.buffer, logical.byteOffset + sectionLogical, 32); + const sectionId = view.getUint8(0); + if (sectionId !== 1) { + throw new Error( + `E57: expected CompressedVector section (id=1) at physical ${physicalSectionOffset}, got id=${sectionId}`, + ); + } + const dataPhysicalOffset = readU64LE(view, 16); + return physicalToLogical(dataPhysicalOffset, pageSize); +} + +export function readU64LE(view: DataView, offset: number): number { + const lo = view.getUint32(offset, true); + const hi = view.getUint32(offset + 4, true); + return hi * 0x100000000 + lo; +} diff --git a/packages/pointcloud/src/formats/e57-xml.ts b/packages/pointcloud/src/formats/e57-xml.ts new file mode 100644 index 000000000..d713eace8 --- /dev/null +++ b/packages/pointcloud/src/formats/e57-xml.ts @@ -0,0 +1,162 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * E57 XML model + parser. + * + * The XML carries the `data3D` structure: per-scan record counts, the + * binary CompressedVector fileOffset, and the prototype field + * declarations (`Float`, `Integer`, `ScaledInteger`) that describe the + * binary record layout. + */ + +import { + childByName, + childrenByName, + parseXml, + textChild, +} from '../xml-mini.js'; + +export interface PrototypeField { + name: string; + kind: 'Float' | 'ScaledInteger' | 'Integer'; + precision?: 'single' | 'double'; + scale?: number; + offset?: number; + minimum?: number; + maximum?: number; +} + +/** + * Per-scan pose: rotation (unit quaternion w + xi + yj + zk) + + * translation (in source-frame metres). Optional because most + * single-scan exports don't need one — when absent we treat the + * scan as already in the file's global frame (identity pose). + */ +export interface E57Pose { + rotation: { w: number; x: number; y: number; z: number }; + translation: { x: number; y: number; z: number }; +} + +export interface Data3DEntry { + guid: string; + name?: string; + recordCount: number; + /** Logical offset into the file where the binary section begins. */ + binaryFileOffset: number; + /** Field declarations in record order. */ + prototype: PrototypeField[]; + /** + * Per-Data3D pose that places the scan into the file's global + * frame. Applied by `decodeE57` before merging multi-scan files; + * single-scan files where the pose is identity / absent are no-ops. + */ + pose?: E57Pose; +} + +/** + * Parse the E57 XML section. + * + * Uses our own minimal SAX-style parser (`xml-mini.ts`) instead of + * `DOMParser` because dedicated Web Workers — where the decode runs — + * don't expose DOMParser. The shape we need (e57Root → data3D → + * vectorChild → prototype) is shallow and attribute-heavy, well within + * the mini parser's scope. + */ +export function parseE57Xml(xmlText: string): Data3DEntry[] { + const root = parseXml(xmlText); + if (root.name !== 'e57Root') { + throw new Error(`E57: XML root is not (saw <${root.name || '?'}>)`); + } + const data3D = childByName(root, 'data3D'); + if (!data3D) return []; + const entries: Data3DEntry[] = []; + for (const scan of childrenByName(data3D, 'vectorChild')) { + const points = childByName(scan, 'points'); + if (!points) continue; + if (points.attrs.get('type') !== 'CompressedVector') { + // Skip non-compressed-vector points (rare). + continue; + } + const fileOffsetAttr = points.attrs.get('fileOffset'); + const recordCountAttr = points.attrs.get('recordCount'); + if (!fileOffsetAttr || !recordCountAttr) continue; + // Reject NaN / negative parses up front. Without this guard a + // malformed XML attribute (e.g. fileOffset="-1" or + // recordCount="bogus") would flow into decodeE57Scan and either + // overrun the bytestream walk or allocate a zero-byte position + // buffer — neither produces a useful diagnostic. + const binaryFileOffset = Number(fileOffsetAttr); + const recordCount = Number(recordCountAttr); + if (!Number.isFinite(binaryFileOffset) || binaryFileOffset < 0) continue; + if (!Number.isFinite(recordCount) || recordCount < 0) continue; + const proto = childByName(points, 'prototype'); + if (!proto) continue; + const fields: PrototypeField[] = []; + for (const f of proto.children) { + const type = f.attrs.get('type') ?? ''; + if (type === 'Float') { + fields.push({ + name: f.name, + kind: 'Float', + precision: f.attrs.get('precision') === 'single' ? 'single' : 'double', + }); + } else if (type === 'ScaledInteger') { + fields.push({ + name: f.name, + kind: 'ScaledInteger', + scale: Number(f.attrs.get('scale') ?? '1'), + offset: Number(f.attrs.get('offset') ?? '0'), + minimum: Number(f.attrs.get('minimum') ?? '0'), + maximum: Number(f.attrs.get('maximum') ?? '0'), + }); + } else if (type === 'Integer') { + fields.push({ + name: f.name, + kind: 'Integer', + minimum: Number(f.attrs.get('minimum') ?? '0'), + maximum: Number(f.attrs.get('maximum') ?? '0'), + }); + } + // Other types (e.g. String) ignored — never carry point data. + } + entries.push({ + guid: textChild(scan, 'guid') ?? '', + name: textChild(scan, 'name') ?? undefined, + recordCount, + binaryFileOffset, + prototype: fields, + pose: parsePoseElement(childByName(scan, 'pose')) ?? undefined, + }); + } + return entries; +} + +/** + * Parse a `` element to a quaternion + translation pair. + * Returns null when the element is missing or malformed (any field + * unparseable → fall back to identity rather than reject the file). + */ +function parsePoseElement(poseEl: ReturnType): E57Pose | null { + if (!poseEl) return null; + const rotation = childByName(poseEl, 'rotation'); + const translation = childByName(poseEl, 'translation'); + if (!rotation || !translation) return null; + const qw = Number(textChild(rotation, 'w') ?? '1'); + const qx = Number(textChild(rotation, 'x') ?? '0'); + const qy = Number(textChild(rotation, 'y') ?? '0'); + const qz = Number(textChild(rotation, 'z') ?? '0'); + const tx = Number(textChild(translation, 'x') ?? '0'); + const ty = Number(textChild(translation, 'y') ?? '0'); + const tz = Number(textChild(translation, 'z') ?? '0'); + if (![qw, qx, qy, qz, tx, ty, tz].every(Number.isFinite)) return null; + return { + rotation: { w: qw, x: qx, y: qy, z: qz }, + translation: { x: tx, y: ty, z: tz }, + }; +} + +export function findField(proto: PrototypeField[], name: string): PrototypeField | undefined { + return proto.find((p) => p.name === name); +} diff --git a/packages/pointcloud/src/formats/e57.test.ts b/packages/pointcloud/src/formats/e57.test.ts index afbd18674..90640f7e1 100644 --- a/packages/pointcloud/src/formats/e57.test.ts +++ b/packages/pointcloud/src/formats/e57.test.ts @@ -165,18 +165,146 @@ describe('decodeE57Scan (uncompressed Float64)', () => { expect(chunk.bbox).toEqual({ min: [1.5, 2.5, -3.5], max: [7.0, 8.0, 9.0] }); }); - it('throws clearly when prototype uses ScaledInteger for cartesian fields', () => { + it('accepts a fully-packed packet (bytestreams fill the packet exactly)', () => { + // Regression for the false-positive packet-bounds guard that + // assumed a 4-byte trailing CRC inside each DataPacket. CRCs are + // page-level, not packet-level, so real exporters (Faro Focus, + // Leica BLK) emit packets where the last bytestream ends at + // `offset + packetLogicalLength` exactly. With the old guard + // those packets failed with "bytestream X runs past packet payload". + const numPoints = 3; + const lenF64 = numPoints * 8; + const lengths = [lenF64, lenF64, lenF64]; + const totalPayload = lengths.reduce((a, b) => a + b, 0); + const headerBytes = 4 + 2 + 3 * 2; + // No trailing slack: packet = header + payload, bytestreams fill it. + const packetSize = headerBytes + totalPayload; + const buf = new ArrayBuffer(packetSize); + const view = new DataView(buf); + view.setUint8(0, 1); + view.setUint8(1, 0); + view.setUint16(2, packetSize - 1, true); + view.setUint16(4, 3, true); + for (let i = 0; i < 3; i++) view.setUint16(6 + i * 2, lengths[i], true); + let cursor = headerBytes; + for (let i = 0; i < numPoints; i++) view.setFloat64(cursor + i * 8, i + 1, true); + cursor += lenF64; + for (let i = 0; i < numPoints; i++) view.setFloat64(cursor + i * 8, i + 10, true); + cursor += lenF64; + for (let i = 0; i < numPoints; i++) view.setFloat64(cursor + i * 8, i + 100, true); + const entry: Data3DEntry = { guid: 'test', - recordCount: 0, + recordCount: numPoints, binaryFileOffset: 0, prototype: [ - { name: 'cartesianX', kind: 'ScaledInteger', scale: 0.001, offset: 0, minimum: 0, maximum: 1 }, + { name: 'cartesianX', kind: 'Float', precision: 'double' }, { name: 'cartesianY', kind: 'Float', precision: 'double' }, { name: 'cartesianZ', kind: 'Float', precision: 'double' }, ], }; - expect(() => decodeE57Scan(new Uint8Array(0), entry)).toThrow(/ScaledInteger/); + const chunk = decodeE57Scan(new Uint8Array(buf), entry); + expect(chunk.pointCount).toBe(3); + expect(Array.from(chunk.positions)).toEqual([1, 10, 100, 2, 11, 101, 3, 12, 102]); + }); + + it('decodes ScaledInteger cartesian streams (bit-packed integer codec)', () => { + // Synthetic 2-point packet, bitsPerRecord=8 per axis (span = 255): + // minimum=-100, maximum=155, scale=0.01, offset=0 + // bitsPerRecord = ceil(log2(255 - (-100) + 1)) = ceil(log2(256)) = 8 + // For each point we pack `raw_int = original - minimum` into the + // bytestream; decoded float = (raw_int + minimum) * scale + offset. + const buf = new ArrayBuffer(22); + const view = new DataView(buf); + const bytes = new Uint8Array(buf); + + view.setUint8(0, 1); // packetType = data + view.setUint8(1, 0); // flags + view.setUint16(2, 21, true); // packetLogicalLength - 1 (total = 22) + view.setUint16(4, 3, true); // bytestreamCount + view.setUint16(6, 2, true); // X bytestream length + view.setUint16(8, 2, true); // Y bytestream length + view.setUint16(10, 2, true); // Z bytestream length + + // X: point0 raw=50 (→ −0.5), point1 raw=100 (→ 0.0) + bytes[12] = 50; + bytes[13] = 100; + // Y: point0 raw=110 (→ 0.10), point1 raw=120 (→ 0.20) + bytes[14] = 110; + bytes[15] = 120; + // Z: point0 raw=200 (→ 1.00), point1 raw=255 (→ 1.55) + bytes[16] = 200; + bytes[17] = 255; + // bytes[18..21] = trailing 4-byte CRC (ignored) + + const entry: Data3DEntry = { + guid: 'test', + recordCount: 2, + binaryFileOffset: 0, + prototype: [ + { name: 'cartesianX', kind: 'ScaledInteger', scale: 0.01, offset: 0, minimum: -100, maximum: 155 }, + { name: 'cartesianY', kind: 'ScaledInteger', scale: 0.01, offset: 0, minimum: -100, maximum: 155 }, + { name: 'cartesianZ', kind: 'ScaledInteger', scale: 0.01, offset: 0, minimum: -100, maximum: 155 }, + ], + }; + const chunk = decodeE57Scan(bytes, entry); + expect(chunk.pointCount).toBe(2); + expect(chunk.positions[0]).toBeCloseTo(-0.5, 5); + expect(chunk.positions[1]).toBeCloseTo(0.10, 5); + expect(chunk.positions[2]).toBeCloseTo(1.00, 5); + expect(chunk.positions[3]).toBeCloseTo(0.0, 5); + expect(chunk.positions[4]).toBeCloseTo(0.20, 5); + expect(chunk.positions[5]).toBeCloseTo(1.55, 5); + }); + + it('decodes ScaledInteger streams with bitsPerRecord that crosses byte boundaries', () => { + // bitsPerRecord = 12 for X (min=0, max=4095). Two 12-bit values + // pack into 3 bytes LSB-first: [0xABC, 0xDEF] → [0xBC, 0xFA, 0xDE] + // byte 0 = value0 & 0xFF = 0xBC + // byte 1 = (value0 >> 8) | ((value1 & 0xF) << 4) = 0xA | 0xF0 = 0xFA + // byte 2 = value1 >> 4 = 0xDE + // Y and Z use 4-bit packing (two values per byte) to keep the + // packet compact. Three bytestreams are required because + // decodeE57Scan demands all three cartesian axes. + const fullLen = 4 + 2 + 2*3 + 3 + 1 + 1 + 4; + const fullBuf = new ArrayBuffer(fullLen); + const fv = new DataView(fullBuf); + const fb = new Uint8Array(fullBuf); + fv.setUint8(0, 1); + fv.setUint8(1, 0); + fv.setUint16(2, fullLen - 1, true); + fv.setUint16(4, 3, true); + fv.setUint16(6, 3, true); // X length (3 bytes for 2×12-bit values) + fv.setUint16(8, 1, true); // Y length (1 byte, bitsPerRecord=4 covers 2 values) + fv.setUint16(10, 1, true); // Z length + fb[12] = 0xBC; + fb[13] = 0xFA; + fb[14] = 0xDE; + fb[15] = 0x32; // Y: low nibble = 2, high nibble = 3 (LSB first) + fb[16] = 0x54; // Z: low nibble = 4, high nibble = 5 + + const entry: Data3DEntry = { + guid: 'test', + recordCount: 2, + binaryFileOffset: 0, + prototype: [ + // X: 12-bit, raw bytes pack [0xBC, 0xFA, 0xDE] → [0xABC, 0xDEF] + { name: 'cartesianX', kind: 'ScaledInteger', scale: 1, offset: 0, minimum: 0, maximum: 4095 }, + // Y: 4-bit, raw [0x2, 0x3] + { name: 'cartesianY', kind: 'ScaledInteger', scale: 1, offset: 0, minimum: 0, maximum: 15 }, + // Z: 4-bit, raw [0x4, 0x5] + { name: 'cartesianZ', kind: 'ScaledInteger', scale: 1, offset: 0, minimum: 0, maximum: 15 }, + ], + }; + const chunk = decodeE57Scan(fb, entry); + expect(chunk.pointCount).toBe(2); + // (raw + minimum) * scale + offset, with min=0 scale=1 offset=0 → raw + expect(chunk.positions[0]).toBe(0xABC); + expect(chunk.positions[1]).toBe(0x2); + expect(chunk.positions[2]).toBe(0x4); + expect(chunk.positions[3]).toBe(0xDEF); + expect(chunk.positions[4]).toBe(0x3); + expect(chunk.positions[5]).toBe(0x5); }); }); @@ -239,7 +367,7 @@ describe('parseE57Xml (worker-safe; no DOMParser dependency)', () => { expect(() => parseE57Xml('')).toThrow(/e57Root/); }); - it('flags scans that carry a child so multi-scan rejection can fire', () => { + it('extracts rotation + translation when present', () => { const xml = ` @@ -250,8 +378,8 @@ describe('parseE57Xml (worker-safe; no DOMParser dependency)', () => { - 1000 - 1000 + 0.7071067811865476000.7071067811865476 + 103.5-2 @@ -265,8 +393,42 @@ describe('parseE57Xml (worker-safe; no DOMParser dependency)', () => { `; const entries = parseE57Xml(xml); expect(entries).toHaveLength(2); - expect(entries[0].hasPose).toBe(true); - expect(entries[1].hasPose).toBe(false); + expect(entries[0].pose).toBeDefined(); + expect(entries[0].pose!.rotation.w).toBeCloseTo(0.7071, 3); + expect(entries[0].pose!.rotation.z).toBeCloseTo(0.7071, 3); + expect(entries[0].pose!.translation.x).toBe(10); + expect(entries[0].pose!.translation.y).toBe(3.5); + expect(entries[0].pose!.translation.z).toBe(-2); + expect(entries[1].pose).toBeUndefined(); + }); +}); + +describe('applyPoseInPlace', () => { + it('rotates 90° around Z + translates per the unit-quaternion convention', async () => { + const { applyPoseInPlace } = await import('./e57.js'); + // 90° rotation around +Z: q = (cos(45°), 0, 0, sin(45°)) + // (1, 0, 0) → (0, 1, 0); then translate (10, 0, 0) → (10, 1, 0) + const positions = new Float32Array([1, 0, 0, 0, 1, 0, 0, 0, 1]); + applyPoseInPlace(positions, 3, { + rotation: { w: Math.SQRT1_2, x: 0, y: 0, z: Math.SQRT1_2 }, + translation: { x: 10, y: 0, z: 0 }, + }); + // Float32 lossy → use closeTo + expect(positions[0]).toBeCloseTo(10, 5); expect(positions[1]).toBeCloseTo(1, 5); expect(positions[2]).toBeCloseTo(0, 5); + expect(positions[3]).toBeCloseTo(9, 5); expect(positions[4]).toBeCloseTo(0, 5); expect(positions[5]).toBeCloseTo(0, 5); + expect(positions[6]).toBeCloseTo(10, 5); expect(positions[7]).toBeCloseTo(0, 5); expect(positions[8]).toBeCloseTo(1, 5); + }); + + it('identity quaternion + zero translation is a no-op', async () => { + const { applyPoseInPlace } = await import('./e57.js'); + const positions = new Float32Array([1.5, 2.5, 3.5]); + applyPoseInPlace(positions, 1, { + rotation: { w: 1, x: 0, y: 0, z: 0 }, + translation: { x: 0, y: 0, z: 0 }, + }); + expect(positions[0]).toBeCloseTo(1.5, 5); + expect(positions[1]).toBeCloseTo(2.5, 5); + expect(positions[2]).toBeCloseTo(3.5, 5); }); }); diff --git a/packages/pointcloud/src/formats/e57.ts b/packages/pointcloud/src/formats/e57.ts index 5205c4886..1cea1dc6f 100644 --- a/packages/pointcloud/src/formats/e57.ts +++ b/packages/pointcloud/src/formats/e57.ts @@ -3,535 +3,37 @@ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ /** - * E57 (ASTM E2807-11) reader — point cloud subset. + * E57 (ASTM E2807-11) reader — top-level orchestrator. * - * Scope: - * - File header (48 bytes) — magic + xmlPhysicalOffset/Length + pageSize. - * - Physical → logical view: every 1024-byte page ends with a 4-byte - * CRC32-C; we strip those to get the logical byte stream the XML + - * binary indices reference. CRCs are NOT validated (faster + still - * correct for well-formed files). - * - XML parsed with DOMParser to find Data3D entries with FloatNodes - * for cartesianX / cartesianY / cartesianZ and optional colorRed / - * colorGreen / colorBlue / intensity. - * - Binary section: walk DataPackets at the prototype's CompressedVector - * fileOffset, decode bytestreams as raw Float32 / Float64 columns. + * Pulls the file-header / page-CRC handling from `e57-page.ts`, the + * XML model from `e57-xml.ts`, and the per-scan binary decoder from + * `e57-decode.ts`. Re-exports the public surface so existing callers + * (`@ifc-lite/pointcloud` index, the streaming source, tests) keep + * working. * - * What we don't yet support: - * - ScaledIntegerNode encoding (bit-packed integers with scale/offset) - * — this is the more compact format; we throw a clear error so the - * caller can guide the user to a Float-encoded export. - * - Spherical coordinates (most files use cartesian). - * - Per-scan pose transforms — points come back in scan-local space. + * Scope: + * - Single-scan + multi-scan files. Per-Data3D pose (quaternion + + * translation) is applied before merging so registered scans + * line up in the file's global frame. + * - Float (single/double) AND ScaledInteger (bit-packed integer + * with scale/offset per E57 §6.3.4) for cartesian fields. + * - Integer / Float / ScaledInteger colour + intensity channels. * - * That subset still covers a large fraction of real-world E57 files - * (Faro, Leica, Trimble, generic exports) and gives a clean error - * message on the rest. + * Out of scope (deferred — see issue #611): + * - Spherical coordinate prototypes. */ -import type { DecodedPointChunk, PointCloudBBox } from '../types.js'; +import type { DecodedPointChunk } from '../types.js'; import { - childByName, - childrenByName, - parseXml, - textChild, -} from '../xml-mini.js'; - -const E57_MAGIC = 'ASTM-E57'; - -export interface E57FileHeader { - majorVersion: number; - minorVersion: number; - fileLogicalSize: number; - xmlLogicalOffset: number; - xmlLogicalLength: number; - pageSize: number; -} - -/** Read the 48-byte FileHeader. Throws on bad magic. */ -export function parseE57FileHeader(bytes: Uint8Array): E57FileHeader { - if (bytes.length < 48) throw new Error('E57: header truncated (need 48 bytes)'); - const magic = String.fromCharCode(...bytes.subarray(0, 8)); - if (magic !== E57_MAGIC) { - throw new Error(`E57: bad magic "${magic}" (expected "${E57_MAGIC}")`); - } - const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength); - return { - majorVersion: view.getUint32(8, true), - minorVersion: view.getUint32(12, true), - fileLogicalSize: readU64LE(view, 16), - // Physical XML offset → we convert to logical below; xmlLogicalLength - // is the byte length AFTER stripping page CRCs. - xmlLogicalOffset: physicalToLogical(readU64LE(view, 24), readU64LE(view, 40)), - xmlLogicalLength: readU64LE(view, 32), - pageSize: readU64LE(view, 40), - }; -} - -/** - * Strip the 4-byte CRC tail from each `pageSize`-byte physical page. - * - * Returns a freshly-allocated buffer of "logical" bytes — the form that - * XML offsets and CompressedVector data offsets reference. - * - * `pageSize` is read from the header and is conventionally 1024. - */ -export function stripPageCrc(bytes: Uint8Array, pageSize: number): Uint8Array { - if (pageSize <= 4) throw new Error('E57: pageSize too small'); - const payloadPerPage = pageSize - 4; - const fullPages = Math.floor(bytes.length / pageSize); - const tail = bytes.length - fullPages * pageSize; - // Trailing partial page (if any) still carries 4 CRC bytes when complete; - // when the file ends mid-page we can't trust those tail bytes, so we - // stop at the last complete page boundary. - const out = new Uint8Array(fullPages * payloadPerPage + Math.max(0, tail - 4)); - let dst = 0; - for (let p = 0; p < fullPages; p++) { - const src = p * pageSize; - out.set(bytes.subarray(src, src + payloadPerPage), dst); - dst += payloadPerPage; - } - if (tail > 4) { - const src = fullPages * pageSize; - out.set(bytes.subarray(src, src + tail - 4), dst); - } - return out; -} - -/** Convert a physical (CRC-paged) offset to the equivalent logical offset. */ -function physicalToLogical(physical: number, pageSize: number): number { - const payloadPerPage = pageSize - 4; - const pages = Math.floor(physical / pageSize); - const within = physical - pages * pageSize; - return pages * payloadPerPage + within; -} - -/** - * Read a CompressedVector binary-section header (E57 spec §6.4.2) and - * return the LOGICAL byte offset where its DataPackets actually start. - * - * Layout (32 bytes): - * [ 0] u8 sectionId (must == 1 for CompressedVector) - * [ 1] u8[7] reserved - * [ 8] u64 LE sectionLogicalLength - * [16] u64 LE dataPhysicalOffset - * [24] u64 LE indexPhysicalOffset - * - * The XML's `points@fileOffset` points at this section header — NOT at - * the first DataPacket. Reading packets straight at `fileOffset` puts - * the parser ~32 bytes off and the first u16 it reads is the low half - * of `sectionLogicalLength`, which usually decodes as a bytestreamCount - * of 0 (matched the user-reported `bytestreamCount (0) ≠ prototype - * length (7)` error exactly). - */ -export function resolveCompressedVectorDataOffset( - logical: Uint8Array, - physicalSectionOffset: number, - pageSize: number, -): number { - const sectionLogical = physicalToLogical(physicalSectionOffset, pageSize); - if (sectionLogical + 32 > logical.length) { - throw new Error( - `E57: CompressedVector section header at logical ${sectionLogical} runs past end of file (length ${logical.length})`, - ); - } - const view = new DataView(logical.buffer, logical.byteOffset + sectionLogical, 32); - const sectionId = view.getUint8(0); - if (sectionId !== 1) { - throw new Error( - `E57: expected CompressedVector section (id=1) at physical ${physicalSectionOffset}, got id=${sectionId}`, - ); - } - const dataPhysicalOffset = readU64LE(view, 16); - return physicalToLogical(dataPhysicalOffset, pageSize); -} - -// ─── XML model ────────────────────────────────────────────────────────────── - -interface PrototypeField { - name: string; - kind: 'Float' | 'ScaledInteger' | 'Integer'; - precision?: 'single' | 'double'; - scale?: number; - offset?: number; - minimum?: number; - maximum?: number; -} - -export interface Data3DEntry { - guid: string; - name?: string; - recordCount: number; - /** Logical offset into the file where the binary section begins. */ - binaryFileOffset: number; - /** Field declarations in record order. */ - prototype: PrototypeField[]; - /** - * Whether this Data3D defines a `pose` element (translation + - * rotation that places the scan in the file's global frame). We - * don't apply the transform yet — single-scan files don't need it, - * and multi-scan files with poses are rejected upfront so we never - * silently merge in scan-local space. - */ - hasPose?: boolean; -} + parseE57FileHeader, + resolveCompressedVectorDataOffset, + stripPageCrc, +} from './e57-page.js'; +import { parseE57Xml, type E57Pose } from './e57-xml.js'; +import { computeBBox, decodeE57Scan } from './e57-decode.js'; const TEXT_DECODER = new TextDecoder(); -/** - * Parse the E57 XML section. - * - * Uses our own minimal SAX-style parser (`xml-mini.ts`) instead of - * `DOMParser` because dedicated Web Workers — where the decode runs — - * don't expose DOMParser. The shape we need (e57Root → data3D → - * vectorChild → prototype) is shallow and attribute-heavy, well within - * the mini parser's scope. - */ -export function parseE57Xml(xmlText: string): Data3DEntry[] { - const root = parseXml(xmlText); - if (root.name !== 'e57Root') { - throw new Error(`E57: XML root is not (saw <${root.name || '?'}>)`); - } - const data3D = childByName(root, 'data3D'); - if (!data3D) return []; - const entries: Data3DEntry[] = []; - for (const scan of childrenByName(data3D, 'vectorChild')) { - const points = childByName(scan, 'points'); - if (!points) continue; - if (points.attrs.get('type') !== 'CompressedVector') { - // Skip non-compressed-vector points (rare). - continue; - } - const fileOffsetAttr = points.attrs.get('fileOffset'); - const recordCountAttr = points.attrs.get('recordCount'); - if (!fileOffsetAttr || !recordCountAttr) continue; - const proto = childByName(points, 'prototype'); - if (!proto) continue; - const fields: PrototypeField[] = []; - for (const f of proto.children) { - const type = f.attrs.get('type') ?? ''; - if (type === 'Float') { - fields.push({ - name: f.name, - kind: 'Float', - precision: f.attrs.get('precision') === 'single' ? 'single' : 'double', - }); - } else if (type === 'ScaledInteger') { - fields.push({ - name: f.name, - kind: 'ScaledInteger', - scale: Number(f.attrs.get('scale') ?? '1'), - offset: Number(f.attrs.get('offset') ?? '0'), - minimum: Number(f.attrs.get('minimum') ?? '0'), - maximum: Number(f.attrs.get('maximum') ?? '0'), - }); - } else if (type === 'Integer') { - fields.push({ - name: f.name, - kind: 'Integer', - minimum: Number(f.attrs.get('minimum') ?? '0'), - maximum: Number(f.attrs.get('maximum') ?? '0'), - }); - } - // Other types (e.g. String) ignored — never carry point data. - } - entries.push({ - guid: textChild(scan, 'guid') ?? '', - name: textChild(scan, 'name') ?? undefined, - recordCount: Number(recordCountAttr), - binaryFileOffset: Number(fileOffsetAttr), - prototype: fields, - hasPose: childByName(scan, 'pose') !== null, - }); - } - return entries; -} - -// ─── binary decode ────────────────────────────────────────────────────────── - -/** - * Decode the binary section starting at `entry.binaryFileOffset` in the - * logical-bytes view. NOTE: `binaryFileOffset` here must already point - * at the first DataPacket (i.e. AFTER the 32-byte CompressedVector - * section header) — `decodeE57` does this conversion via - * `resolveCompressedVectorDataOffset`. Callers passing the raw XML - * offset directly will see a "bytestreamCount ≠ prototype length" - * mismatch. - * - * Returns one DecodedPointChunk per scan; caller can concatenate or - * emit them as separate streaming chunks. - * - * Limitations (Phase-1 E57): - * - Only Float (single/double) prototype fields are decoded. Files - * using ScaledInteger throw a clear error so the host can fall back - * gracefully. - * - Reads only cartesianX/Y/Z + colorRed/Green/Blue + intensity when - * present. Other fields are honoured for stride math but discarded. - */ -export function decodeE57Scan(logical: Uint8Array, entry: Data3DEntry): DecodedPointChunk { - const xField = findField(entry.prototype, 'cartesianX'); - const yField = findField(entry.prototype, 'cartesianY'); - const zField = findField(entry.prototype, 'cartesianZ'); - if (!xField || !yField || !zField) { - throw new Error('E57: prototype missing cartesianX/Y/Z'); - } - for (const f of [xField, yField, zField]) { - if (f.kind !== 'Float') { - throw new Error( - `E57: cartesianX/Y/Z encoded as ${f.kind} (only Float supported in this build)`, - ); - } - } - const rField = findField(entry.prototype, 'colorRed'); - const gField = findField(entry.prototype, 'colorGreen'); - const bField = findField(entry.prototype, 'colorBlue'); - const hasRgb = !!(rField && gField && bField); - const iField = findField(entry.prototype, 'intensity'); - // Bit-packed (ScaledInteger) intensity isn't supported yet — surface - // the limitation explicitly rather than silently dropping it. - if (iField && iField.kind === 'ScaledInteger') { - throw new Error( - 'E57: intensity encoded as ScaledInteger (bit-packed integer codec not yet supported)', - ); - } - - const positions = new Float32Array(entry.recordCount * 3); - const colors = hasRgb ? new Float32Array(entry.recordCount * 3) : undefined; - // Allocate intensity buffer for both Float and Integer kinds — only - // ScaledInteger is unsupported (rejected above). Otherwise - // Integer-encoded intensity (common with u16-range producers) was - // silently dropped. - const intensities = iField && (iField.kind === 'Float' || iField.kind === 'Integer') - ? new Uint16Array(entry.recordCount) - : undefined; - - // Walk DataPackets starting at binaryFileOffset. - // Packet header (4 bytes): - // byte 0: packetType (1=data, 2=index, 3=empty) - // byte 1: packetFlags (bit 0 = compressorRestart) - // bytes 2..3: packetLogicalLength - 1 (LE u16; total packet bytes minus 1) - // Followed by per-bytestream sections, then 4-byte CRC at the end of - // each packet (already part of the page-level CRC strip — packet CRCs - // sit in the LOGICAL stream and we ignore them here for speed). - let offset = entry.binaryFileOffset; - const view = new DataView(logical.buffer, logical.byteOffset, logical.byteLength); - let written = 0; - - while (written < entry.recordCount && offset < logical.length) { - if (offset + 4 > logical.length) { - throw new Error('E57: truncated DataPacket header'); - } - const packetType = view.getUint8(offset); - // packetFlags = view.getUint8(offset + 1) // unused for plain data - const packetLogicalLength = view.getUint16(offset + 2, true) + 1; - if (packetType !== 1) { - // Skip non-data packets (index/empty); they may appear interleaved. - offset += packetLogicalLength; - continue; - } - const packetEnd = offset + packetLogicalLength; - if (packetEnd > logical.length) { - throw new Error('E57: DataPacket runs past end of logical bytes'); - } - // Data packet header beyond the common 4 bytes: - // byte 4..5: bytestreamCount (u16 LE) - // then `bytestreamCount` × u16 LE = bytestreamByteCount[] - // then payload (concatenated bytestreams, in prototype order) - const bytestreamCount = view.getUint16(offset + 4, true); - if (bytestreamCount !== entry.prototype.length) { - throw new Error( - `E57: packet bytestreamCount (${bytestreamCount}) ≠ prototype length (${entry.prototype.length})`, - ); - } - const bytestreamLengths: number[] = []; - let cursor = offset + 6; - for (let i = 0; i < bytestreamCount; i++) { - bytestreamLengths.push(view.getUint16(cursor, true)); - cursor += 2; - } - // CRC at packet tail (4 bytes) — ignored. - const packetPointsBefore = written; - const fieldOffsets = new Map(); - let streamCursor = cursor; - for (let i = 0; i < bytestreamCount; i++) { - fieldOffsets.set(entry.prototype[i].name, { start: streamCursor, length: bytestreamLengths[i] }); - streamCursor += bytestreamLengths[i]; - } - - // Decode this packet's points - const xByteSize = xField.precision === 'single' ? 4 : 8; - const yByteSize = yField.precision === 'single' ? 4 : 8; - const zByteSize = zField.precision === 'single' ? 4 : 8; - const pointsInPacket = Math.floor((fieldOffsets.get('cartesianX')!.length) / xByteSize); - if ( - pointsInPacket !== Math.floor(fieldOffsets.get('cartesianY')!.length / yByteSize) - || pointsInPacket !== Math.floor(fieldOffsets.get('cartesianZ')!.length / zByteSize) - ) { - throw new Error('E57: cartesianX/Y/Z bytestream lengths disagree on point count'); - } - const take = Math.min(pointsInPacket, entry.recordCount - written); - - const xStart = fieldOffsets.get('cartesianX')!.start; - const yStart = fieldOffsets.get('cartesianY')!.start; - const zStart = fieldOffsets.get('cartesianZ')!.start; - - if (xField.precision === 'single') { - for (let i = 0; i < take; i++) { - positions[(written + i) * 3] = view.getFloat32(xStart + i * 4, true); - } - } else { - for (let i = 0; i < take; i++) { - positions[(written + i) * 3] = view.getFloat64(xStart + i * 8, true); - } - } - if (yField.precision === 'single') { - for (let i = 0; i < take; i++) { - positions[(written + i) * 3 + 1] = view.getFloat32(yStart + i * 4, true); - } - } else { - for (let i = 0; i < take; i++) { - positions[(written + i) * 3 + 1] = view.getFloat64(yStart + i * 8, true); - } - } - if (zField.precision === 'single') { - for (let i = 0; i < take; i++) { - positions[(written + i) * 3 + 2] = view.getFloat32(zStart + i * 4, true); - } - } else { - for (let i = 0; i < take; i++) { - positions[(written + i) * 3 + 2] = view.getFloat64(zStart + i * 8, true); - } - } - - if (colors && rField && gField && bField) { - writeColorChannel(view, fieldOffsets.get('colorRed')!.start, rField, colors, written, take, 0); - writeColorChannel(view, fieldOffsets.get('colorGreen')!.start, gField, colors, written, take, 1); - writeColorChannel(view, fieldOffsets.get('colorBlue')!.start, bField, colors, written, take, 2); - } - if (intensities && iField) { - const iStart = fieldOffsets.get('intensity')!.start; - if (iField.kind === 'Float') { - const stride = iField.precision === 'single' ? 4 : 8; - for (let i = 0; i < take; i++) { - const v = stride === 4 ? view.getFloat32(iStart + i * stride, true) : view.getFloat64(iStart + i * stride, true); - intensities[written + i] = Math.min(65535, Math.max(0, Math.round(v * 65535))); - } - } else { - // Integer-encoded intensity — pick element width from declared - // range (same logic as the integer color channels). - const min = iField.minimum ?? 0; - const max = iField.maximum ?? 65535; - const span = max - min; - const inv = span > 0 ? 1 / span : 1; - const widest = Math.max(Math.abs(min), Math.abs(max)); - const stride = widest > 255 ? 2 : 1; - const signed = min < 0; - for (let i = 0; i < take; i++) { - const off = iStart + i * stride; - const raw = stride === 2 - ? (signed ? view.getInt16(off, true) : view.getUint16(off, true)) - : (signed ? view.getInt8(off) : view.getUint8(off)); - const norm = (raw - min) * inv; - intensities[written + i] = Math.min(65535, Math.max(0, Math.round(norm * 65535))); - } - } - } - - written += take; - void packetPointsBefore; - offset = packetEnd; - } - - if (written < entry.recordCount) { - // Real-world files sometimes report counts a few records higher - // than what's actually stored; trim positions to the actual count - // so downstream code doesn't see uninitialised tail values. - return finalize(positions.subarray(0, written * 3), colors?.subarray(0, written * 3), intensities?.subarray(0, written), written); - } - return finalize(positions, colors, intensities, entry.recordCount); -} - -function writeColorChannel( - view: DataView, - start: number, - field: PrototypeField, - colors: Float32Array, - written: number, - take: number, - channelOffset: 0 | 1 | 2, -): void { - if (field.kind === 'Float') { - const stride = field.precision === 'single' ? 4 : 8; - for (let i = 0; i < take; i++) { - const v = stride === 4 ? view.getFloat32(start + i * stride, true) : view.getFloat64(start + i * stride, true); - colors[(written + i) * 3 + channelOffset] = clamp01(v); - } - } else if (field.kind === 'Integer') { - // Pick element width from the declared range. E57 producers use - // either u8 (0..255 — most common) or u16 (0..65535). Both - // appear in real files; assuming u8 distorts u16-encoded colors. - const min = field.minimum ?? 0; - const max = field.maximum ?? 255; - const span = max - min; - const inv = span > 0 ? 1 / span : 1; - const widest = Math.max(Math.abs(min), Math.abs(max)); - const stride = widest > 255 ? 2 : 1; - const signed = min < 0; - for (let i = 0; i < take; i++) { - const off = start + i * stride; - const raw = stride === 2 - ? (signed ? view.getInt16(off, true) : view.getUint16(off, true)) - : (signed ? view.getInt8(off) : view.getUint8(off)); - colors[(written + i) * 3 + channelOffset] = clamp01((raw - min) * inv); - } - } else { - throw new Error('E57: ScaledInteger color encoding not yet supported'); - } -} - -function finalize( - positions: Float32Array, - colors: Float32Array | undefined, - intensities: Uint16Array | undefined, - pointCount: number, -): DecodedPointChunk { - return { - positions: new Float32Array(positions), - colors: colors ? new Float32Array(colors) : undefined, - intensities: intensities ? new Uint16Array(intensities) : undefined, - pointCount, - bbox: computeBBox(positions), - }; -} - -function findField(proto: PrototypeField[], name: string): PrototypeField | undefined { - return proto.find((p) => p.name === name); -} - -function readU64LE(view: DataView, offset: number): number { - const lo = view.getUint32(offset, true); - const hi = view.getUint32(offset + 4, true); - return hi * 0x100000000 + lo; -} - -function clamp01(v: number): number { - return v < 0 ? 0 : v > 1 ? 1 : v; -} - -function computeBBox(positions: Float32Array): PointCloudBBox { - let minX = Infinity, minY = Infinity, minZ = Infinity; - let maxX = -Infinity, maxY = -Infinity, maxZ = -Infinity; - for (let i = 0; i < positions.length; i += 3) { - const x = positions[i], y = positions[i + 1], z = positions[i + 2]; - if (x < minX) minX = x; if (x > maxX) maxX = x; - if (y < minY) minY = y; if (y > maxY) maxY = y; - if (z < minZ) minZ = z; if (z > maxZ) maxZ = z; - } - return { min: [minX, minY, minZ], max: [maxX, maxY, maxZ] }; -} - -// ─── high-level entry ─────────────────────────────────────────────────────── - /** * Decode all Data3D scans in an E57 file. Combines them into a single * DecodedPointChunk (positions concatenated). Returns null when the @@ -545,36 +47,31 @@ export function decodeE57(bytes: Uint8Array): DecodedPointChunk | null { const entries = parseE57Xml(xmlText); if (entries.length === 0) return null; - // Multi-scan registered E57 files store each scan in its own local - // frame and rely on the per-Data3D `pose` (rotation + translation) to - // place them in the file's global frame. We don't apply that - // transform yet, so silently concatenating registered multi-scan - // files would produce a misaligned mess. Reject upfront with a - // clear error so the user can use the export-merged option in their - // scan-processing tool. - if (entries.length > 1 && entries.some((e) => e.hasPose)) { - throw new Error( - `E57: file contains ${entries.length} scans with per-scan poses (registered multi-scan). ` - + 'Multi-scan pose merging is not yet supported — please re-export as a single merged scan.', - ); - } - // Resolve every entry's binary file offset through the // CompressedVector section header. The XML's fileOffset is the // section header (physical), not the first DataPacket. + // Per-Data3D pose (when present) places each scan in the file's + // global frame: `global = R * local + T`. We apply it after + // decoding but before merging, so multi-scan registered E57s line + // up correctly. Identity / absent poses are no-ops. const chunks = entries.map((entry) => { const dataLogicalOffset = resolveCompressedVectorDataOffset( logical, entry.binaryFileOffset, header.pageSize, ); - return decodeE57Scan(logical, { ...entry, binaryFileOffset: dataLogicalOffset }); + const chunk = decodeE57Scan(logical, { ...entry, binaryFileOffset: dataLogicalOffset }); + if (entry.pose) { + applyPoseInPlace(chunk.positions, chunk.pointCount, entry.pose); + chunk.bbox = computeBBox(chunk.positions); + } + return chunk; }); if (chunks.length === 1) return chunks[0]; - // Concatenate. Use some() so a single scan that lacks color/intensity - // doesn't drop the channel for the whole merged cloud — we just leave - // its slice at the default zeros and emit the channel anyway. + // Concatenate. `some()` checks per channel so a single scan that + // lacks color/intensity doesn't drop the channel for the whole + // merged cloud — we just leave its slice at the default zeros. let total = 0; for (const c of chunks) total += c.pointCount; const positions = new Float32Array(total * 3); @@ -585,10 +82,6 @@ export function decodeE57(bytes: Uint8Array): DecodedPointChunk | null { let off = 0; for (const c of chunks) { positions.set(c.positions, off * 3); - // Per-chunk conditional set: chunks without a channel just leave - // their slice at the default zero, which renders as black for - // colors / unlit for intensity. Better than dropping the whole - // channel because of a single mixed-attribute file. if (colors && c.colors) colors.set(c.colors, off * 3); if (intensities && c.intensities) intensities.set(c.intensities, off); off += c.pointCount; @@ -601,3 +94,53 @@ export function decodeE57(bytes: Uint8Array): DecodedPointChunk | null { bbox: computeBBox(positions), }; } + +/** + * Apply a per-scan pose (rotation quaternion + translation) to a + * positions buffer in place: `out = R · in + T`. + * + * Quaternion is in Hamilton convention (w + xi + yj + zk); we derive + * the 3×3 rotation matrix once and reuse across every point in the + * chunk. Translation is added after rotation. + */ +export function applyPoseInPlace( + positions: Float32Array, + pointCount: number, + pose: E57Pose, +): void { + const { w, x, y, z } = pose.rotation; + const tx = pose.translation.x; + const ty = pose.translation.y; + const tz = pose.translation.z; + const r00 = 1 - 2 * (y * y + z * z); + const r01 = 2 * (x * y - w * z); + const r02 = 2 * (x * z + w * y); + const r10 = 2 * (x * y + w * z); + const r11 = 1 - 2 * (x * x + z * z); + const r12 = 2 * (y * z - w * x); + const r20 = 2 * (x * z - w * y); + const r21 = 2 * (y * z + w * x); + const r22 = 1 - 2 * (x * x + y * y); + for (let i = 0; i < pointCount; i++) { + const px = positions[i * 3]; + const py = positions[i * 3 + 1]; + const pz = positions[i * 3 + 2]; + positions[i * 3] = r00 * px + r01 * py + r02 * pz + tx; + positions[i * 3 + 1] = r10 * px + r11 * py + r12 * pz + ty; + positions[i * 3 + 2] = r20 * px + r21 * py + r22 * pz + tz; + } +} + +// Re-export the public API so existing imports keep working. +export { + parseE57FileHeader, + stripPageCrc, + resolveCompressedVectorDataOffset, + type E57FileHeader, +} from './e57-page.js'; +export { + parseE57Xml, + type Data3DEntry, + type E57Pose, +} from './e57-xml.js'; +export { decodeE57Scan } from './e57-decode.js'; diff --git a/packages/pointcloud/src/formats/ifcx-points.ts b/packages/pointcloud/src/formats/ifcx-points.ts index 8820b67d0..1202fb7dd 100644 --- a/packages/pointcloud/src/formats/ifcx-points.ts +++ b/packages/pointcloud/src/formats/ifcx-points.ts @@ -144,13 +144,23 @@ function base64ToBytes(b64: string): Uint8Array { } function computeBBox(positions: Float32Array): PointCloudBBox { + // Mirrors the e57.ts guards: empty / non-finite input must produce + // finite zero-bbox rather than ±Infinity, which would poison camera + // fit-to-view and section-plane math. + if (positions.length < 3) { + return { min: [0, 0, 0], max: [0, 0, 0] }; + } let minX = Infinity, minY = Infinity, minZ = Infinity; let maxX = -Infinity, maxY = -Infinity, maxZ = -Infinity; - for (let i = 0; i < positions.length; i += 3) { + let any = false; + for (let i = 0; i + 2 < positions.length; i += 3) { const x = positions[i], y = positions[i + 1], z = positions[i + 2]; + if (!Number.isFinite(x) || !Number.isFinite(y) || !Number.isFinite(z)) continue; + any = true; if (x < minX) minX = x; if (x > maxX) maxX = x; if (y < minY) minY = y; if (y > maxY) maxY = y; if (z < minZ) minZ = z; if (z > maxZ) maxZ = z; } + if (!any) return { min: [0, 0, 0], max: [0, 0, 0] }; return { min: [minX, minY, minZ], max: [maxX, maxY, maxZ] }; } diff --git a/packages/pointcloud/src/index.ts b/packages/pointcloud/src/index.ts index a5dcc191a..24f0ae62f 100644 --- a/packages/pointcloud/src/index.ts +++ b/packages/pointcloud/src/index.ts @@ -47,6 +47,14 @@ export { LazStreamingSource } from './streaming/laz-source.js'; export { PlyStreamingSource } from './streaming/ply-source.js'; export { PcdStreamingSource } from './streaming/pcd-source.js'; export { E57StreamingSource } from './streaming/e57-source.js'; +export { AsciiPointsStreamingSource } from './streaming/ascii-points-source.js'; +export { + decodeAsciiPoints, + decodeAsciiPointsFromText, + probeAsciiPointsLayout, + type AsciiPointsFormat, + type AsciiPointsLayout, +} from './formats/ascii-points.js'; export { parsePlyHeader } from './formats/ply.js'; export { parseE57FileHeader, diff --git a/packages/pointcloud/src/streaming/ascii-points-source.ts b/packages/pointcloud/src/streaming/ascii-points-source.ts new file mode 100644 index 000000000..ec301d89e --- /dev/null +++ b/packages/pointcloud/src/streaming/ascii-points-source.ts @@ -0,0 +1,114 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * PTS / XYZ streaming source — whole-file ASCII decode. + * + * ASCII formats don't support random-access seek by point index, so + * we read the whole blob, decode once, and serve as a single chunk. + * Memory is still bounded by the host's 25M-point cap via the + * `stride` downsample applied here. + * + * The whole-file approach is simpler and matches PLY's behaviour; + * for files past the cap we apply stride downsampling on the way + * out so the GPU upload stays bounded. + */ + +import type { DecodedPointChunk } from '../types.js'; +import { + decodeAsciiPoints, + type AsciiPointsFormat, +} from '../formats/ascii-points.js'; +import type { + DownsampleHint, + PointSourceInfo, + StreamingPointSource, +} from './types.js'; + +export class AsciiPointsStreamingSource implements StreamingPointSource { + private blob: Blob; + private format: AsciiPointsFormat; + private downsample: DownsampleHint; + private label?: string; + private chunk: DecodedPointChunk | null = null; + private served = false; + + constructor( + blob: Blob, + format: AsciiPointsFormat, + options: { label?: string; downsample?: DownsampleHint } = {}, + ) { + this.blob = blob; + this.format = format; + this.downsample = options.downsample ?? { stride: 1 }; + this.label = options.label; + } + + async open(signal?: AbortSignal): Promise { + abortIfAborted(signal); + const buf = await this.blob.arrayBuffer(); + abortIfAborted(signal); + const bytes = new Uint8Array(buf); + const fullChunk = decodeAsciiPoints(bytes, this.format); + this.chunk = applyStride(fullChunk, this.downsample.stride); + return { + totalPointCount: this.chunk.pointCount, + bbox: this.chunk.bbox, + hasColor: !!this.chunk.colors, + hasClassification: false, + hasIntensity: !!this.chunk.intensities, + label: this.label, + }; + } + + async next(maxPoints: number, signal?: AbortSignal): Promise { + abortIfAborted(signal); + if (!this.chunk || this.served) return null; + void maxPoints; // Whole-file decode: ignore chunk-size hint. + this.served = true; + return this.chunk; + } + + close(): void { + this.chunk = null; + this.served = false; + } +} + +function applyStride(chunk: DecodedPointChunk, stride: number): DecodedPointChunk { + const s = Math.max(1, stride | 0); + if (s === 1) return chunk; + const newCount = Math.ceil(chunk.pointCount / s); + const positions = new Float32Array(newCount * 3); + const colors = chunk.colors ? new Float32Array(newCount * 3) : undefined; + const intensities = chunk.intensities ? new Uint16Array(newCount) : undefined; + let dst = 0; + for (let i = 0; i < chunk.pointCount; i += s) { + positions[dst * 3] = chunk.positions[i * 3]; + positions[dst * 3 + 1] = chunk.positions[i * 3 + 1]; + positions[dst * 3 + 2] = chunk.positions[i * 3 + 2]; + if (colors && chunk.colors) { + colors[dst * 3] = chunk.colors[i * 3]; + colors[dst * 3 + 1] = chunk.colors[i * 3 + 1]; + colors[dst * 3 + 2] = chunk.colors[i * 3 + 2]; + } + if (intensities && chunk.intensities) { + intensities[dst] = chunk.intensities[i]; + } + dst++; + } + return { + positions, + colors, + intensities, + pointCount: newCount, + bbox: chunk.bbox, + }; +} + +function abortIfAborted(signal?: AbortSignal): void { + if (signal?.aborted) { + throw new DOMException('Aborted', 'AbortError'); + } +} diff --git a/packages/pointcloud/src/streaming/decode-worker.ts b/packages/pointcloud/src/streaming/decode-worker.ts index 838bdc72d..a91112292 100644 --- a/packages/pointcloud/src/streaming/decode-worker.ts +++ b/packages/pointcloud/src/streaming/decode-worker.ts @@ -26,6 +26,7 @@ import { LazStreamingSource } from './laz-source.js'; import { PlyStreamingSource } from './ply-source.js'; import { PcdStreamingSource } from './pcd-source.js'; import { E57StreamingSource } from './e57-source.js'; +import { AsciiPointsStreamingSource } from './ascii-points-source.js'; declare const self: DedicatedWorkerGlobalScope; @@ -139,7 +140,7 @@ function handleAbort(sourceId: number): void { } function createSource( - format: 'las' | 'laz' | 'ply' | 'pcd' | 'e57', + format: 'las' | 'laz' | 'ply' | 'pcd' | 'e57' | 'pts' | 'xyz', blob: Blob, opts: { label?: string; downsample: { stride: number } }, ): StreamingPointSource { @@ -148,6 +149,8 @@ function createSource( if (format === 'ply') return new PlyStreamingSource(blob, opts); if (format === 'pcd') return new PcdStreamingSource(blob, opts); if (format === 'e57') return new E57StreamingSource(blob, opts); + if (format === 'pts') return new AsciiPointsStreamingSource(blob, 'pts', opts); + if (format === 'xyz') return new AsciiPointsStreamingSource(blob, 'xyz', opts); throw new Error(`decode-worker: unknown format "${format}"`); } diff --git a/packages/pointcloud/src/streaming/e57-source.ts b/packages/pointcloud/src/streaming/e57-source.ts index 3fa1e98b7..9306d8953 100644 --- a/packages/pointcloud/src/streaming/e57-source.ts +++ b/packages/pointcloud/src/streaming/e57-source.ts @@ -36,7 +36,7 @@ export class E57StreamingSource implements StreamingPointSource { totalPointCount: this.chunk.pointCount, bbox: this.chunk.bbox, hasColor: !!this.chunk.colors, - hasClassification: false, + hasClassification: !!this.chunk.classifications, hasIntensity: !!this.chunk.intensities, label: this.label, }; @@ -63,6 +63,7 @@ function applyStride(chunk: DecodedPointChunk, stride: number): DecodedPointChun const positions = new Float32Array(newCount * 3); const colors = chunk.colors ? new Float32Array(newCount * 3) : undefined; const intensities = chunk.intensities ? new Uint16Array(newCount) : undefined; + const classifications = chunk.classifications ? new Uint8Array(newCount) : undefined; let dst = 0; for (let i = 0; i < chunk.pointCount; i += s) { positions[dst * 3] = chunk.positions[i * 3]; @@ -76,12 +77,16 @@ function applyStride(chunk: DecodedPointChunk, stride: number): DecodedPointChun if (intensities && chunk.intensities) { intensities[dst] = chunk.intensities[i]; } + if (classifications && chunk.classifications) { + classifications[dst] = chunk.classifications[i]; + } dst++; } return { positions, colors, intensities, + classifications, pointCount: newCount, bbox: chunk.bbox, }; diff --git a/packages/pointcloud/src/streaming/laz-source.ts b/packages/pointcloud/src/streaming/laz-source.ts index 79a9e3219..9c5b03ea7 100644 --- a/packages/pointcloud/src/streaming/laz-source.ts +++ b/packages/pointcloud/src/streaming/laz-source.ts @@ -47,6 +47,18 @@ let modulePromise: Promise | null = null; async function loadLazPerf(): Promise { if (!modulePromise) { modulePromise = (async () => { + // The shipped `laz-perf.js` shim resolves the wasm via emscripten's + // `locateFile` and tries `fetch("laz-perf.wasm")` relative to the + // worker's script directory — which under Vite ends up at + // `/assets/.wasm` (404) or `/laz-perf.wasm` (404 → SPA index + // HTML served as text/plain, which is what triggered the + // "MIME type 'text/plain'" failure on autzen-classified.laz). + // + // Pre-fetch the wasm via Vite's `?url` asset pipeline (hashed, + // served with `application/wasm`) and hand the bytes to emscripten + // as `Module.wasmBinary` so its own fetch is skipped entirely. + const wasmBinary = await fetchLazPerfWasm(); + // Dynamic import keeps `laz-perf` out of bundles that don't touch // LAZ. The package is shipped as CommonJS (`lib/{node,web}/index.js`), // and Vite/webpack wrap CJS imports under `.default` — but the way @@ -56,7 +68,7 @@ async function loadLazPerf(): Promise { // • { default: createLazPerf } — esModuleInterop on a fn // • module-as-function (legacy UMD) — `lazPerf` IS the factory const ns = (await import('laz-perf')) as unknown as Record; - type Factory = () => Promise; + type Factory = (moduleOverrides?: Record) => Promise; const dflt = ns.default as Record | (() => unknown) | undefined; const candidates: Array = [ ns.createLazPerf, @@ -72,12 +84,35 @@ async function loadLazPerf(): Promise { `laz-perf: could not find createLazPerf factory (saw keys: ${keys || ''})`, ); } - return factory(); + return factory({ wasmBinary }); })(); } return modulePromise; } +async function fetchLazPerfWasm(): Promise { + // `?url` triggers Vite's asset pipeline so the .wasm ends up in the + // build output with the right MIME type. Wrapped in a try/catch so + // non-Vite hosts (e.g. node-side tests) can fall back to the package's + // own `locateFile` resolution. + let wasmUrl: string | undefined; + try { + const mod = (await import('laz-perf/lib/web/laz-perf.wasm?url')) as { default: string }; + wasmUrl = mod.default; + } catch (err) { + throw new Error( + `laz-perf: could not resolve wasm asset URL (${err instanceof Error ? err.message : String(err)}). ` + + 'Ensure the bundler treats `laz-perf/lib/web/laz-perf.wasm?url` as a static asset.', + ); + } + const response = await fetch(wasmUrl); + if (!response.ok) { + throw new Error(`laz-perf: wasm fetch failed (${response.status} ${response.statusText}) for ${wasmUrl}`); + } + const buffer = await response.arrayBuffer(); + return new Uint8Array(buffer); +} + export class LazStreamingSource implements StreamingPointSource { private blob: Blob; private downsample: DownsampleHint; diff --git a/packages/pointcloud/src/streaming/protocol.ts b/packages/pointcloud/src/streaming/protocol.ts index 081f94387..1705d0d64 100644 --- a/packages/pointcloud/src/streaming/protocol.ts +++ b/packages/pointcloud/src/streaming/protocol.ts @@ -12,7 +12,7 @@ import type { DecodedPointChunk, PointCloudBBox } from '../types.js'; import type { PointSourceInfo } from './types.js'; -export type WorkerSourceFormat = 'las' | 'laz' | 'ply' | 'pcd' | 'e57'; +export type WorkerSourceFormat = 'las' | 'laz' | 'ply' | 'pcd' | 'e57' | 'pts' | 'xyz'; /** main → worker */ export type WorkerRequest = diff --git a/packages/pointcloud/src/streaming/vendor.d.ts b/packages/pointcloud/src/streaming/vendor.d.ts new file mode 100644 index 000000000..a846c27c2 --- /dev/null +++ b/packages/pointcloud/src/streaming/vendor.d.ts @@ -0,0 +1,13 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * Ambient declarations for vendor modules with non-standard import + * shapes (Vite `?url` asset queries) used by streaming sources. + */ + +declare module 'laz-perf/lib/web/laz-perf.wasm?url' { + const url: string; + export default url; +} diff --git a/packages/pointcloud/src/streaming/worker-client.ts b/packages/pointcloud/src/streaming/worker-client.ts index f7815a4de..b7d5ec913 100644 --- a/packages/pointcloud/src/streaming/worker-client.ts +++ b/packages/pointcloud/src/streaming/worker-client.ts @@ -25,7 +25,7 @@ import type { StreamingPointSource, } from './types.js'; -export type DecodeWorkerFormat = 'las' | 'laz' | 'ply' | 'pcd' | 'e57'; +export type DecodeWorkerFormat = 'las' | 'laz' | 'ply' | 'pcd' | 'e57' | 'pts' | 'xyz'; export interface DecodeWorkerOptions { /** Override the worker constructor — useful for tests or custom bundlers. */ diff --git a/packages/renderer/src/deviation/deviation-pipeline.ts b/packages/renderer/src/deviation/deviation-pipeline.ts new file mode 100644 index 000000000..68960a043 --- /dev/null +++ b/packages/renderer/src/deviation/deviation-pipeline.ts @@ -0,0 +1,194 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * Compute pipeline for BIM ↔ scan deviation. Owns the GPU-resident + * triangle BVH, builds bind groups per chunk on demand, and dispatches + * the closest-point-on-triangle compute shader. + * + * The BVH lives in two storage buffers (nodes + triangles); both are + * uploaded once per mesh-set change and reused across every chunk in + * every point cloud asset. Per-chunk bind groups are created lazily + * because a chunk's vertex buffer is the `positions` storage source + * for the compute pass. + */ + +import { deviationShaderSource } from './deviation-shader.wgsl.js'; +import type { TriangleBVHResult } from './triangle-bvh.js'; +import { POINT_VERTEX_BYTES } from '../pointcloud/point-pipeline.js'; + +/** Bytes per BVH node — 8 floats / 8 u32s laid out per the shader. */ +const BVH_NODE_BYTES = 32; +/** Bytes per triangle — 12 floats (3 verts + face normal). */ +const TRIANGLE_BYTES = 48; +/** + * Uniform block size for `DeviationParams`. WGSL std140 packs the + * struct compactly; we add 4 padding u32s to round to 32 bytes which + * matches a single uniform alignment slot on every WebGPU impl. + */ +const PARAMS_UNIFORM_BYTES = 32; + +export interface DeviationDispatchInput { + /** Storage-usage GPU buffer holding interleaved point vertices. + * Must be the same buffer used as the splat pipeline's vertex + * buffer for this chunk; the compute shader reads positions + * directly from it (no copy). */ + positionsBuffer: GPUBuffer; + /** Output buffer — one f32 per point. Must allow STORAGE. */ + deviationsBuffer: GPUBuffer; + /** Number of points to process. */ + pointCount: number; + /** Optional clip range in metres. 0 / negative → no clip. */ + maxRange: number; +} + +export class DeviationPipeline { + private device: GPUDevice; + private pipeline: GPUComputePipeline; + private bindGroupLayout: GPUBindGroupLayout; + private bvhNodesBuffer: GPUBuffer | null = null; + private trianglesBuffer: GPUBuffer | null = null; + private bvhTriangleCount = 0; + private bvhNodeCount = 0; + private bvhBounds: TriangleBVHResult['bounds'] | null = null; + + constructor(device: GPUDevice) { + this.device = device; + this.bindGroupLayout = device.createBindGroupLayout({ + entries: [ + { binding: 0, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'read-only-storage' } }, + { binding: 1, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'read-only-storage' } }, + { binding: 2, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'read-only-storage' } }, + { binding: 3, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } }, + { binding: 4, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'uniform' } }, + ], + }); + const layout = device.createPipelineLayout({ bindGroupLayouts: [this.bindGroupLayout] }); + this.pipeline = device.createComputePipeline({ + layout, + compute: { + module: device.createShaderModule({ code: deviationShaderSource }), + entryPoint: 'cs_main', + }, + }); + } + + /** + * Upload the per-triangle BVH to the GPU. Replaces any previous + * upload; safe to call repeatedly when the mesh set changes (load, + * federation update, isolation toggle). + */ + uploadBvh(bvh: TriangleBVHResult): void { + this.disposeBvh(); + if (bvh.triangleCount === 0) { + this.bvhNodeCount = 0; + this.bvhTriangleCount = 0; + this.bvhBounds = bvh.bounds; + return; + } + // Nodes: pack into a STORAGE buffer. The Float32Array view also + // contains u32 fields (childA, childB) but they were already + // written via Uint32Array aliasing during the build, so a single + // write of the underlying ArrayBuffer carries them too. + const nodeBuf = this.device.createBuffer({ + size: bvh.nodeCount * BVH_NODE_BYTES, + usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, + }); + this.device.queue.writeBuffer( + nodeBuf, 0, + // Pass the typed-array view directly — TS widens `.buffer` to + // ArrayBufferLike on a function-parameter Float32Array which + // doesn't satisfy writeBuffer's signature. The view form has + // size in elements (so we slice down to the populated head). + bvh.nodes.subarray(0, bvh.nodeCount * BVH_NODE_BYTES / 4), + ); + const triBuf = this.device.createBuffer({ + size: bvh.triangleCount * TRIANGLE_BYTES, + usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, + }); + this.device.queue.writeBuffer( + triBuf, 0, + bvh.triangles.subarray(0, bvh.triangleCount * TRIANGLE_BYTES / 4), + ); + this.bvhNodesBuffer = nodeBuf; + this.trianglesBuffer = triBuf; + this.bvhNodeCount = bvh.nodeCount; + this.bvhTriangleCount = bvh.triangleCount; + this.bvhBounds = bvh.bounds; + } + + hasBvh(): boolean { + return this.bvhNodesBuffer !== null && this.trianglesBuffer !== null && this.bvhTriangleCount > 0; + } + + getBvhStats(): { nodeCount: number; triangleCount: number; bounds: TriangleBVHResult['bounds'] | null } { + return { + nodeCount: this.bvhNodeCount, + triangleCount: this.bvhTriangleCount, + bounds: this.bvhBounds, + }; + } + + /** + * Run the compute pass for one point chunk. Encoder-based so the + * caller can dispatch many chunks back-to-back in one submit. + * + * Returns false when there's no BVH uploaded yet — caller should + * skip the chunk in that case. + */ + dispatch(encoder: GPUCommandEncoder, input: DeviationDispatchInput): boolean { + if (!this.bvhNodesBuffer || !this.trianglesBuffer) return false; + if (input.pointCount === 0) return true; + + // Per-chunk uniform buffer with the dispatch params. Created + // fresh each call — 32 bytes is too small to bother caching. + const paramsBuffer = this.device.createBuffer({ + size: PARAMS_UNIFORM_BYTES, + usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, + }); + const params = new Uint32Array(PARAMS_UNIFORM_BYTES / 4); + const paramsF = new Float32Array(params.buffer); + params[0] = input.pointCount; + // Each point in the splat vertex layout occupies POINT_VERTEX_BYTES + // (24 bytes). The shader walks `positions` as a flat f32 array; one + // point = 6 floats; the vec3 position is at offset 0. + params[1] = POINT_VERTEX_BYTES / 4; // pointStrideF32 + params[2] = 0; // positionOffsetF32 + paramsF[3] = Math.max(0, input.maxRange); + // params[4..7] reserved padding; left zero. + this.device.queue.writeBuffer(paramsBuffer, 0, params.buffer, 0, PARAMS_UNIFORM_BYTES); + + const bindGroup = this.device.createBindGroup({ + layout: this.bindGroupLayout, + entries: [ + { binding: 0, resource: { buffer: this.bvhNodesBuffer } }, + { binding: 1, resource: { buffer: this.trianglesBuffer } }, + { binding: 2, resource: { buffer: input.positionsBuffer } }, + { binding: 3, resource: { buffer: input.deviationsBuffer } }, + { binding: 4, resource: { buffer: paramsBuffer } }, + ], + }); + + const pass = encoder.beginComputePass(); + pass.setPipeline(this.pipeline); + pass.setBindGroup(0, bindGroup); + // Workgroup size 64; ceil division. Most GPUs handle ~10⁵ + // workgroups in one dispatch without trouble. + const groupCount = Math.ceil(input.pointCount / 64); + pass.dispatchWorkgroups(groupCount); + pass.end(); + return true; + } + + private disposeBvh(): void { + this.bvhNodesBuffer?.destroy(); + this.trianglesBuffer?.destroy(); + this.bvhNodesBuffer = null; + this.trianglesBuffer = null; + } + + destroy(): void { + this.disposeBvh(); + } +} diff --git a/packages/renderer/src/deviation/deviation-shader.wgsl.ts b/packages/renderer/src/deviation/deviation-shader.wgsl.ts new file mode 100644 index 000000000..d7325f919 --- /dev/null +++ b/packages/renderer/src/deviation/deviation-shader.wgsl.ts @@ -0,0 +1,238 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * Compute shader for BIM ↔ scan deviation. + * + * For each scan point, walk a per-triangle BVH and compute the signed + * distance to the nearest mesh surface. Sign is positive on the side + * the triangle's outward normal points to. + * + * Bind group layout: + * @binding(0) bvhNodes: array, 2> — 32-byte nodes (read) + * packed: [aabbMin, leafOrLeft] + * [aabbMax, leafOrRight] + * @binding(1) triangles: array — 12 floats per triangle + * @binding(2) positions: array — 6 floats per point + * (matches POINT_VERTEX_BYTES + * layout: vec3 + colorPacked) + * @binding(3) deviations: array — output, one float per point + * @binding(4) params: DeviationParams + * + * Workgroup size 64 (one wavefront / warp on most desktop GPUs). + * Each invocation processes one point. + */ + +export const deviationShaderSource = /* wgsl */ ` +struct BvhNode { + aabbMinX: f32, aabbMinY: f32, aabbMinZ: f32, + // High bit: leaf flag. Low 31 bits: leaf=triStart, internal=leftChildIdx. + leafOrLeft: u32, + aabbMaxX: f32, aabbMaxY: f32, aabbMaxZ: f32, + // Leaf=triCount, internal=rightChildIdx. + countOrRight: u32, +} + +struct DeviationParams { + pointCount: u32, + pointStrideF32: u32, // floats between successive points in positions buffer + positionOffsetF32: u32, // float offset of vec3 position within a point + // Optional clip range — when nonzero, signs above + below are kept + // but values past ±maxRange are clamped (saves shader work for + // points far outside the model). + maxRange: f32, + // Reserved padding to keep the struct 16-byte aligned for std140. + _pad0: u32, _pad1: u32, _pad2: u32, _pad3: u32, +} + +@group(0) @binding(0) var bvhNodes: array; +@group(0) @binding(1) var triangles: array; +@group(0) @binding(2) var positions: array; +@group(0) @binding(3) var deviations: array; +@group(0) @binding(4) var params: DeviationParams; + +const LEAF_FLAG: u32 = 0x80000000u; +const STACK_SIZE: u32 = 64u; + +// Squared distance from point p to AABB [aabbMin, aabbMax]. +// Returns 0 if p is inside the box. +fn distSqPointAabb( + px: f32, py: f32, pz: f32, + ax: f32, ay: f32, az: f32, + bx: f32, by: f32, bz: f32, +) -> f32 { + let dx = max(max(ax - px, 0.0), px - bx); + let dy = max(max(ay - py, 0.0), py - by); + let dz = max(max(az - pz, 0.0), pz - bz); + return dx * dx + dy * dy + dz * dz; +} + +struct ClosestResult { + point: vec3, + distSq: f32, +} + +// Ericson, Real-Time Collision Detection §5.1.5: closest point on +// a triangle to an arbitrary point in space. Branches over the +// Voronoi regions of the triangle (3 verts, 3 edges, interior). +fn closestPointOnTriangle(p: vec3, a: vec3, b: vec3, c: vec3) -> ClosestResult { + let ab = b - a; + let ac = c - a; + let ap = p - a; + let d1 = dot(ab, ap); + let d2 = dot(ac, ap); + if (d1 <= 0.0 && d2 <= 0.0) { + let diff = p - a; + return ClosestResult(a, dot(diff, diff)); + } + let bp = p - b; + let d3 = dot(ab, bp); + let d4 = dot(ac, bp); + if (d3 >= 0.0 && d4 <= d3) { + let diff = p - b; + return ClosestResult(b, dot(diff, diff)); + } + let vc = d1 * d4 - d3 * d2; + if (vc <= 0.0 && d1 >= 0.0 && d3 <= 0.0) { + let v = d1 / (d1 - d3); + let q = a + v * ab; + let diff = p - q; + return ClosestResult(q, dot(diff, diff)); + } + let cp = p - c; + let d5 = dot(ab, cp); + let d6 = dot(ac, cp); + if (d6 >= 0.0 && d5 <= d6) { + let diff = p - c; + return ClosestResult(c, dot(diff, diff)); + } + let vb = d5 * d2 - d1 * d6; + if (vb <= 0.0 && d2 >= 0.0 && d6 <= 0.0) { + let w = d2 / (d2 - d6); + let q = a + w * ac; + let diff = p - q; + return ClosestResult(q, dot(diff, diff)); + } + let va = d3 * d6 - d5 * d4; + if (va <= 0.0 && (d4 - d3) >= 0.0 && (d5 - d6) >= 0.0) { + let w = (d4 - d3) / ((d4 - d3) + (d5 - d6)); + let q = b + w * (c - b); + let diff = p - q; + return ClosestResult(q, dot(diff, diff)); + } + // Inside the face: barycentric (v, w). + let denom = 1.0 / (va + vb + vc); + let v = vb * denom; + let w = vc * denom; + let q = a + ab * v + ac * w; + let diff = p - q; + return ClosestResult(q, dot(diff, diff)); +} + +@compute @workgroup_size(64) +fn cs_main(@builtin(global_invocation_id) gid: vec3) { + let pi = gid.x; + if (pi >= params.pointCount) { + return; + } + let posOff = pi * params.pointStrideF32 + params.positionOffsetF32; + let p = vec3(positions[posOff], positions[posOff + 1u], positions[posOff + 2u]); + + // Best squared distance across all triangles. Stored squared so + // we can prune AABBs without taking sqrt every step. + var bestDistSq: f32 = 1.0e30; + var bestPoint: vec3 = vec3(0.0); + var bestNormal: vec3 = vec3(0.0, 1.0, 0.0); + + // Stack-based BVH descent. Workgroup-uniform stack would let + // siblings cooperate; for v1 a per-thread stack in private memory + // is simpler and fast enough. + var stack: array; + var sp: u32 = 0u; + stack[sp] = 0u; + sp = sp + 1u; + + loop { + if (sp == 0u) { break; } + sp = sp - 1u; + let nodeIdx = stack[sp]; + let node = bvhNodes[nodeIdx]; + + let aabbDistSq = distSqPointAabb( + p.x, p.y, p.z, + node.aabbMinX, node.aabbMinY, node.aabbMinZ, + node.aabbMaxX, node.aabbMaxY, node.aabbMaxZ, + ); + if (aabbDistSq >= bestDistSq) { + continue; + } + + let leafFlag = node.leafOrLeft & LEAF_FLAG; + if (leafFlag != 0u) { + let triStart = node.leafOrLeft & (~LEAF_FLAG); + let triCount = node.countOrRight; + var i: u32 = 0u; + loop { + if (i >= triCount) { break; } + let triOff = (triStart + i) * 12u; + let v0 = vec3(triangles[triOff], triangles[triOff + 1u], triangles[triOff + 2u]); + let v1 = vec3(triangles[triOff + 3u], triangles[triOff + 4u], triangles[triOff + 5u]); + let v2 = vec3(triangles[triOff + 6u], triangles[triOff + 7u], triangles[triOff + 8u]); + let n = vec3(triangles[triOff + 9u], triangles[triOff + 10u], triangles[triOff + 11u]); + let res = closestPointOnTriangle(p, v0, v1, v2); + if (res.distSq < bestDistSq) { + bestDistSq = res.distSq; + bestPoint = res.point; + bestNormal = n; + } + i = i + 1u; + } + } else { + // Internal node: push both children. Sibling that's closer to + // p first (top of stack) so we hit the tighter bound earlier. + let leftIdx = node.leafOrLeft; + let rightIdx = node.countOrRight; + let lNode = bvhNodes[leftIdx]; + let rNode = bvhNodes[rightIdx]; + let lDist = distSqPointAabb( + p.x, p.y, p.z, + lNode.aabbMinX, lNode.aabbMinY, lNode.aabbMinZ, + lNode.aabbMaxX, lNode.aabbMaxY, lNode.aabbMaxZ, + ); + let rDist = distSqPointAabb( + p.x, p.y, p.z, + rNode.aabbMinX, rNode.aabbMinY, rNode.aabbMinZ, + rNode.aabbMaxX, rNode.aabbMaxY, rNode.aabbMaxZ, + ); + // Push the farther child first → closer popped first. + if (sp + 2u <= STACK_SIZE) { + if (lDist < rDist) { + stack[sp] = rightIdx; sp = sp + 1u; + stack[sp] = leftIdx; sp = sp + 1u; + } else { + stack[sp] = leftIdx; sp = sp + 1u; + stack[sp] = rightIdx; sp = sp + 1u; + } + } + } + } + + // Signed distance: project (p - closestPoint) onto the closest + // triangle's normal. Positive ⇒ p is on the outward side. + let toPoint = p - bestPoint; + let dist = sqrt(bestDistSq); + let s = sign(dot(toPoint, bestNormal)); + var signed: f32 = s * dist; + + // Optional clip: keeps the histogram + ramp focused on near-surface + // points. Past ±maxRange the value pegs at the edge. + if (params.maxRange > 0.0) { + let mr = params.maxRange; + if (signed > mr) { signed = mr; } + if (signed < -mr) { signed = -mr; } + } + + deviations[pi] = signed; +} +`; diff --git a/packages/renderer/src/deviation/triangle-bvh.ts b/packages/renderer/src/deviation/triangle-bvh.ts new file mode 100644 index 000000000..41339a837 --- /dev/null +++ b/packages/renderer/src/deviation/triangle-bvh.ts @@ -0,0 +1,284 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * Per-triangle BVH for closest-point queries (BIM ↔ scan deviation). + * + * Distinct from `bvh.ts` which is a per-mesh BVH used for raycasting: + * for closest-point we want fine-grained pruning, so each leaf holds + * a contiguous range of triangles. The build also flattens the tree + * to a `Float32Array` ready for direct GPU upload — no second pass. + * + * Node layout (32 bytes = 8 floats per node, packed Float32 + bitcast u32): + * [0..2] aabbMin (vec3) + * [3] childA / triStart (u32 bitcast: leaf flag = high bit) + * [4..6] aabbMax (vec3) + * [7] childB / triCount (u32 bitcast) + * + * Triangle layout (48 bytes = 12 floats per triangle): + * [0..2] v0 (vec3) + * [3..5] v1 (vec3) + * [6..8] v2 (vec3) + * [9..11] normalised face normal (vec3) — sign convention: outward + * from mesh interior assuming CCW winding (right-hand rule) + * + * Maximum supported triangles: ~2³¹ (one bit reserved for the leaf + * flag). Real BIMs top out around 10⁷ triangles before other bottle- + * necks kick in. + */ + +import type { MeshData } from '@ifc-lite/geometry'; + +export interface TriangleBVHResult { + /** Flat node buffer (Float32Array). Each node is 8 floats. */ + nodes: Float32Array; + /** Flat triangle buffer (Float32Array). Each triangle is 12 floats. */ + triangles: Float32Array; + /** Total number of triangles. */ + triangleCount: number; + /** Total number of nodes (root at index 0). */ + nodeCount: number; + /** Number of source meshes folded into this BVH. */ + meshCount: number; + /** Aggregate bounds of all triangles. */ + bounds: { min: [number, number, number]; max: [number, number, number] }; +} + +/** High bit of the u32-packed slot 3 marks a leaf node. */ +const LEAF_FLAG = 0x80000000; + +/** + * Build the per-triangle BVH. + * + * Splits leaves until each holds at most `maxTrisPerLeaf` triangles + * (default 16, balancing tree depth vs. per-leaf work). Median split + * along the longest AABB axis — fast O(n log n) build, no SAH for v1. + * + * For typical BIMs (1M triangles) the build runs in ~1–3 seconds on + * the main thread. Acceptable since it only re-runs when the mesh + * set changes (load / federation update). + */ +export function buildTriangleBVH( + meshes: ReadonlyArray, + options: { maxTrisPerLeaf?: number } = {}, +): TriangleBVHResult { + const maxTrisPerLeaf = options.maxTrisPerLeaf ?? 16; + + // Pass 1: flatten meshes into a triangle list with precomputed + // centroids + AABBs. This array stays in CPU memory only long + // enough to drive the build; the final output is a packed Float32 + // buffer suitable for GPU upload. + const triCount = countTriangles(meshes); + const triBuf = new Float32Array(triCount * 12); + const centroids = new Float32Array(triCount * 3); + const triAabb = new Float32Array(triCount * 6); // min(x,y,z) + max(x,y,z) + let triIndex = 0; + let bxMin = Infinity, byMin = Infinity, bzMin = Infinity; + let bxMax = -Infinity, byMax = -Infinity, bzMax = -Infinity; + + for (const mesh of meshes) { + const positions = mesh.positions; + const indices = mesh.indices; + if (!positions || positions.length === 0) continue; + const n = indices ? indices.length : positions.length / 3; + for (let i = 0; i + 2 < n; i += 3) { + const i0 = indices ? indices[i] * 3 : (i) * 3; + const i1 = indices ? indices[i + 1] * 3 : (i + 1) * 3; + const i2 = indices ? indices[i + 2] * 3 : (i + 2) * 3; + const x0 = positions[i0], y0 = positions[i0 + 1], z0 = positions[i0 + 2]; + const x1 = positions[i1], y1 = positions[i1 + 1], z1 = positions[i1 + 2]; + const x2 = positions[i2], y2 = positions[i2 + 1], z2 = positions[i2 + 2]; + const off = triIndex * 12; + triBuf[off] = x0; triBuf[off + 1] = y0; triBuf[off + 2] = z0; + triBuf[off + 3] = x1; triBuf[off + 4] = y1; triBuf[off + 5] = z1; + triBuf[off + 6] = x2; triBuf[off + 7] = y2; triBuf[off + 8] = z2; + // Face normal — cross((v1-v0), (v2-v0)), normalised. + const ex = x1 - x0, ey = y1 - y0, ez = z1 - z0; + const fx = x2 - x0, fy = y2 - y0, fz = z2 - z0; + let nx = ey * fz - ez * fy; + let ny = ez * fx - ex * fz; + let nz = ex * fy - ey * fx; + const len = Math.hypot(nx, ny, nz); + if (len > 1e-12) { + nx /= len; ny /= len; nz /= len; + } else { + // Degenerate triangle (zero area) — keep a default so we + // don't poison the GPU with NaN. Sign on these is meaningless. + nx = 0; ny = 1; nz = 0; + } + triBuf[off + 9] = nx; + triBuf[off + 10] = ny; + triBuf[off + 11] = nz; + + // Centroid + AABB for the BVH builder. + const cx = (x0 + x1 + x2) / 3; + const cy = (y0 + y1 + y2) / 3; + const cz = (z0 + z1 + z2) / 3; + centroids[triIndex * 3] = cx; + centroids[triIndex * 3 + 1] = cy; + centroids[triIndex * 3 + 2] = cz; + const tMinX = Math.min(x0, x1, x2); + const tMinY = Math.min(y0, y1, y2); + const tMinZ = Math.min(z0, z1, z2); + const tMaxX = Math.max(x0, x1, x2); + const tMaxY = Math.max(y0, y1, y2); + const tMaxZ = Math.max(z0, z1, z2); + triAabb[triIndex * 6] = tMinX; + triAabb[triIndex * 6 + 1] = tMinY; + triAabb[triIndex * 6 + 2] = tMinZ; + triAabb[triIndex * 6 + 3] = tMaxX; + triAabb[triIndex * 6 + 4] = tMaxY; + triAabb[triIndex * 6 + 5] = tMaxZ; + + if (tMinX < bxMin) bxMin = tMinX; + if (tMinY < byMin) byMin = tMinY; + if (tMinZ < bzMin) bzMin = tMinZ; + if (tMaxX > bxMax) bxMax = tMaxX; + if (tMaxY > byMax) byMax = tMaxY; + if (tMaxZ > bzMax) bzMax = tMaxZ; + + triIndex++; + } + } + + // Build a permutation: tris[outOrder[k]] is the k-th triangle in + // the BVH layout. The final emit re-orders `triBuf` in place via + // a temporary copy so leaves can store [start, count) ranges + // instead of an index list. + const outOrder = new Uint32Array(triIndex); + for (let i = 0; i < triIndex; i++) outOrder[i] = i; + + // Allocate node buffer with worst-case size: 2N - 1 nodes for N + // leaves (binary tree). Trim at the end. + const maxNodes = Math.max(1, 2 * triIndex - 1); + const nodes = new Float32Array(maxNodes * 8); + const nodesU32 = new Uint32Array(nodes.buffer); + let nodeCursor = 0; + + // Iterative build to avoid JS recursion limits on big BIMs. + // Stack entries hold the [start, end) range in `outOrder` and the + // node index to populate. + interface Frame { start: number; end: number; nodeIdx: number; } + const allocNode = (): number => { + const idx = nodeCursor++; + return idx; + }; + const setNodeAabb = (nodeIdx: number, mn: [number, number, number], mx: [number, number, number]): void => { + const off = nodeIdx * 8; + nodes[off] = mn[0]; nodes[off + 1] = mn[1]; nodes[off + 2] = mn[2]; + nodes[off + 4] = mx[0]; nodes[off + 5] = mx[1]; nodes[off + 6] = mx[2]; + }; + const setLeaf = (nodeIdx: number, triStart: number, triCount: number): void => { + const off = nodeIdx * 8; + // High bit set on slot 3 marks "leaf"; remaining 31 bits hold start index. + nodesU32[off + 3] = LEAF_FLAG | (triStart >>> 0); + nodesU32[off + 7] = triCount >>> 0; + }; + const setInternal = (nodeIdx: number, leftIdx: number, rightIdx: number): void => { + const off = nodeIdx * 8; + nodesU32[off + 3] = leftIdx >>> 0; + nodesU32[off + 7] = rightIdx >>> 0; + }; + + const stack: Frame[] = []; + if (triIndex > 0) { + stack.push({ start: 0, end: triIndex, nodeIdx: allocNode() }); + } else { + // Empty BVH — single zero-bound leaf so traversal can early-out. + allocNode(); + setNodeAabb(0, [0, 0, 0], [0, 0, 0]); + setLeaf(0, 0, 0); + } + + while (stack.length > 0) { + const frame = stack.pop()!; + const { start, end, nodeIdx } = frame; + const count = end - start; + + // Compute AABB across the range. + let nMinX = Infinity, nMinY = Infinity, nMinZ = Infinity; + let nMaxX = -Infinity, nMaxY = -Infinity, nMaxZ = -Infinity; + let cMinX = Infinity, cMinY = Infinity, cMinZ = Infinity; + let cMaxX = -Infinity, cMaxY = -Infinity, cMaxZ = -Infinity; + for (let k = start; k < end; k++) { + const t = outOrder[k]; + const aOff = t * 6; + const cOff = t * 3; + if (triAabb[aOff] < nMinX) nMinX = triAabb[aOff]; + if (triAabb[aOff + 1] < nMinY) nMinY = triAabb[aOff + 1]; + if (triAabb[aOff + 2] < nMinZ) nMinZ = triAabb[aOff + 2]; + if (triAabb[aOff + 3] > nMaxX) nMaxX = triAabb[aOff + 3]; + if (triAabb[aOff + 4] > nMaxY) nMaxY = triAabb[aOff + 4]; + if (triAabb[aOff + 5] > nMaxZ) nMaxZ = triAabb[aOff + 5]; + if (centroids[cOff] < cMinX) cMinX = centroids[cOff]; + if (centroids[cOff + 1] < cMinY) cMinY = centroids[cOff + 1]; + if (centroids[cOff + 2] < cMinZ) cMinZ = centroids[cOff + 2]; + if (centroids[cOff] > cMaxX) cMaxX = centroids[cOff]; + if (centroids[cOff + 1] > cMaxY) cMaxY = centroids[cOff + 1]; + if (centroids[cOff + 2] > cMaxZ) cMaxZ = centroids[cOff + 2]; + } + setNodeAabb(nodeIdx, [nMinX, nMinY, nMinZ], [nMaxX, nMaxY, nMaxZ]); + + // Stop splitting when we hit the leaf threshold. + if (count <= maxTrisPerLeaf) { + setLeaf(nodeIdx, start, count); + continue; + } + + // Pick the longest centroid-bbox axis and median-split. + const dx = cMaxX - cMinX, dy = cMaxY - cMinY, dz = cMaxZ - cMinZ; + const axis = (dx > dy && dx > dz) ? 0 : (dy > dz ? 1 : 2); + // Quickselect-ish median split: in-place sort of `outOrder[start:end]` + // by centroid on the chosen axis. JS Array.sort is good enough here. + const slice = Array.from(outOrder.subarray(start, end)); + slice.sort((a, b) => centroids[a * 3 + axis] - centroids[b * 3 + axis]); + for (let i = 0; i < slice.length; i++) outOrder[start + i] = slice[i]; + const mid = start + (count >> 1); + if (mid === start || mid === end) { + // Centroid degeneracy → can't split, fall back to leaf even + // though it exceeds the threshold. Rare in practice. + setLeaf(nodeIdx, start, count); + continue; + } + + const leftIdx = allocNode(); + const rightIdx = allocNode(); + setInternal(nodeIdx, leftIdx, rightIdx); + // Right first, left second → left popped first → DFS left-leaning. + stack.push({ start: mid, end, nodeIdx: rightIdx }); + stack.push({ start, end: mid, nodeIdx: leftIdx }); + } + + // Emit triangles in BVH order so leaves can store [start, count). + const reordered = new Float32Array(triIndex * 12); + for (let k = 0; k < triIndex; k++) { + const src = outOrder[k] * 12; + const dst = k * 12; + for (let i = 0; i < 12; i++) reordered[dst + i] = triBuf[src + i]; + } + + return { + nodes: nodes.subarray(0, nodeCursor * 8), + triangles: reordered, + triangleCount: triIndex, + nodeCount: nodeCursor, + meshCount: meshes.length, + bounds: { + min: triIndex > 0 ? [bxMin, byMin, bzMin] : [0, 0, 0], + max: triIndex > 0 ? [bxMax, byMax, bzMax] : [0, 0, 0], + }, + }; +} + +function countTriangles(meshes: ReadonlyArray): number { + let count = 0; + for (const mesh of meshes) { + const positions = mesh.positions; + if (!positions || positions.length === 0) continue; + const indices = mesh.indices; + const n = indices ? indices.length : positions.length / 3; + count += Math.floor(n / 3); + } + return count; +} diff --git a/packages/renderer/src/index.ts b/packages/renderer/src/index.ts index 1f229905b..7abcd8566 100644 --- a/packages/renderer/src/index.ts +++ b/packages/renderer/src/index.ts @@ -97,6 +97,8 @@ import { PostProcessor } from './post-processor.js'; import { EdlPass } from './edl-pass.js'; import { PointCloudRenderer } from './pointcloud/point-cloud-renderer.js'; import type { PointCloudAsset } from '@ifc-lite/geometry'; +import { DeviationPipeline } from './deviation/deviation-pipeline.js'; +import { buildTriangleBVH } from './deviation/triangle-bvh.js'; const MAX_ENCODED_ENTITY_ID = 0xFFFFFF; let warnedEntityIdRange = false; @@ -120,6 +122,26 @@ type ResolvedVisualEnhancement = { }; }; +/** + * Build a deterministic fingerprint of the BVH input mesh set so + * `Renderer.computeDeviations` can skip the rebuild when the source + * geometry hasn't changed. Folds in expressId / modelIndex / position + * + index lengths per mesh so two distinct mesh sets that happen to + * share the same aggregate position-length total can't collide on the + * same fingerprint and reuse a stale BVH. + */ +function computeBvhFingerprint(meshes: ReadonlyArray): string { + const parts: string[] = [String(meshes.length)]; + for (const m of meshes) { + const id = m.expressId ?? -1; + const mi = m.modelIndex ?? -1; + const posLen = m.positions?.length ?? 0; + const idxLen = m.indices?.length ?? 0; + parts.push(`${id}:${mi}:${posLen}:${idxLen}`); + } + return parts.join('|'); +} + /** * Main renderer class */ @@ -142,6 +164,15 @@ export class Renderer { highQuality: true, }; private pointCloudRenderer: PointCloudRenderer | null = null; + private deviationPipeline: DeviationPipeline | null = null; + /** + * Cache of which mesh-set the BVH was built from. We rebuild on + * `computeDeviations` only when the cached "fingerprint" misses, + * so re-running deviation against the same model is a fast + * dispatch — the BVH is multi-second on big BIMs and we don't + * want to pay that on every slider drag. + */ + private deviationBvhFingerprint: string | null = null; private visualEnhancementState: ResolvedVisualEnhancement = { enabled: true, edgeContrast: { enabled: true, intensity: 1.0 }, @@ -226,6 +257,10 @@ export class Renderer { 'depth24plus-stencil8', this.pipeline.getSampleCount(), ); + // Compute pipeline for the BIM↔scan deviation heatmap. Lazily + // owns the per-triangle BVH GPU buffers; idle until the first + // `computeDeviations` call. + this.deviationPipeline = new DeviationPipeline(this.device.getDevice()); this.edlPass = new EdlPass(this.device, this.pipeline.getSampleCount()); this.camera.setAspect(width / height); @@ -411,6 +446,126 @@ export class Renderer { this.requestRender(); } + /** + * Compute BIM ↔ scan deviation for every loaded point cloud asset. + * + * Walks every triangle in the scene (individual + batched meshes, + * regardless of which IFC ingest path produced them — STEP, IFCx, + * GLB, or federated combinations), builds a per-triangle BVH on + * the GPU, then runs a closest-point compute pass per chunk that + * writes signed distance into each chunk's deviation buffer. + * + * Returns metadata so the UI can populate a histogram + auto-range: + * the per-asset point count, the suggested ±range from the 95th + * percentile, and the bbox the BVH was built from. + * + * Idempotent: re-running with the same mesh set reuses the GPU + * BVH (the BVH build dominates wall time on big BIMs). Pass + * `forceRebuild: true` to invalidate. + */ + async computeDeviations(opts: { + /** Clip range applied during compute. 0 → no clip. Default 1m. */ + maxRange?: number; + forceRebuild?: boolean; + } = {}): Promise<{ + bvhTriangles: number; + bvhNodes: number; + chunksProcessed: number; + pointsProcessed: number; + bounds: { min: [number, number, number]; max: [number, number, number] } | null; + suggestedHalfRange: number; + }> { + if (!this.deviationPipeline || !this.pointCloudRenderer) { + throw new Error('Renderer not initialised — call init() first.'); + } + const meshes = this.collectAllSceneMeshes(); + // Fingerprint folds in per-mesh expressId / modelIndex / + // positions length / triangle count, so two distinct meshes + // that happen to share an aggregate position-length total + // can't alias each other. A federation reload that swaps one + // model for another with the same total triangle count would + // otherwise reuse the previous BVH and report wrong distances. + const fingerprint = computeBvhFingerprint(meshes); + if (opts.forceRebuild || fingerprint !== this.deviationBvhFingerprint) { + const bvh = buildTriangleBVH(meshes); + this.deviationPipeline.uploadBvh(bvh); + this.deviationBvhFingerprint = fingerprint; + } + const stats = this.deviationPipeline.getBvhStats(); + const maxRange = opts.maxRange ?? 1.0; + + // Encode every chunk into a single command submit so the GPU + // can pipeline the dispatches without a CPU round-trip per + // chunk. Histogram readback is a follow-up — for v1 we emit + // the deviation buffers and let the splat shader visualise. + const encoder = this.device.getDevice().createCommandEncoder({ label: 'pointcloud-deviation' }); + let chunksProcessed = 0; + let pointsProcessed = 0; + const nodes = this.pointCloudRenderer.getInternalNodes(); + for (const node of nodes) { + for (const chunk of node.chunks) { + const ok = this.deviationPipeline.dispatch(encoder, { + positionsBuffer: chunk.vertexBuffer, + deviationsBuffer: chunk.deviationBuffer, + pointCount: chunk.pointCount, + maxRange, + }); + if (ok) { + chunksProcessed++; + pointsProcessed += chunk.pointCount; + } + } + } + this.device.getDevice().queue.submit([encoder.finish()]); + // Wait until the GPU finishes the dispatches before resolving. + // Otherwise the caller's "compute done" callback fires before + // the deviation buffers are actually populated. + await this.device.getDevice().queue.onSubmittedWorkDone(); + this.requestRender(); + + // Suggest a default half-range = max(0.01m, max-extent / 1000). + // Tighter than the maxRange clip; gives the user a reasonable + // starting slider position without a histogram readback. + const bb = stats.bounds; + const suggestedHalfRange = bb + ? Math.max(0.01, Math.max( + bb.max[0] - bb.min[0], + bb.max[1] - bb.min[1], + bb.max[2] - bb.min[2], + ) / 1000) + : 0.05; + + return { + bvhTriangles: stats.triangleCount, + bvhNodes: stats.nodeCount, + chunksProcessed, + pointsProcessed, + bounds: stats.bounds, + suggestedHalfRange, + }; + } + + /** + * Aggregate every triangle source the scene exposes — individual + * meshes (created on demand by picking / highlights) AND batched + * meshes (the streaming geometry path's compact GPU buffers). + * Both formats arrive as `MeshData`; the BVH builder doesn't care + * which source they came from. + */ + private collectAllSceneMeshes(): import('@ifc-lite/geometry').MeshData[] { + // The Scene keeps every CPU-side MeshData regardless of which + // ingest path produced it (STEP / IFCx / GLB). One iteration + // covers individual + batched + multi-piece + multi-model. + // `forEachMeshData` deduplicates by identity so a colour-merged + // batch is only added once even if it's indexed under multiple + // contributor expressIds. + const out: import('@ifc-lite/geometry').MeshData[] = []; + this.scene.forEachMeshData((md) => { + if (md.positions && md.positions.length > 0) out.push(md); + }); + return out; + } + /** * Toggle Eye-Dome Lighting and tune its strength. * @@ -1934,6 +2089,25 @@ export class Renderer { return this.pickingManager.pick(x, y, options); } + /** + * GPU-based rectangle pick. Drag-select returns the set of + * `expressId`s touched by any pixel inside `[x0,y0]..[x1,y1]` + * (CSS pixels, canvas-relative). Both meshes and point clouds + * participate. + * + * See `PickingManager.pickRect` for the visibility-filter + + * limitation notes. + */ + async pickRect( + x0: number, + y0: number, + x1: number, + y1: number, + options?: PickOptions, + ): Promise> { + return this.pickingManager.pickRect(x0, y0, x1, y1, options); + } + /** * Raycast into the scene to get precise 3D intersection point * This is more accurate than pick() as it returns the exact surface point @@ -2183,6 +2357,13 @@ export class Renderer { this.pointCloudRenderer?.clear(); this.pointCloudRenderer = null; + // BIM ↔ scan deviation pipeline + cached BVH GPU buffers. + // Done before queue.destroy() so the GPU calls inside + // `destroy()` still have a valid device. + this.deviationPipeline?.destroy(); + this.deviationPipeline = null; + this.deviationBvhFingerprint = null; + // Snap detector geometry cache this.raycastEngine.clearCaches(); } diff --git a/packages/renderer/src/picker.ts b/packages/renderer/src/picker.ts index fd7afea68..9616bff76 100644 --- a/packages/renderer/src/picker.ts +++ b/packages/renderer/src/picker.ts @@ -9,6 +9,35 @@ import { WebGPUDevice } from './device.js'; import type { Mesh, PickResult } from './types.js'; import { PointPicker, decodePickSample, type PointPickNode } from './point-picker.js'; +import { MathUtils } from './math.js'; + +/** + * Reproject a pick coordinate (px, depth in [0, 1]) into world space + * using the inverse view-projection matrix. + * + * Reverse-Z: depth=1 is the near plane, depth=0 is far. A depth of + * exactly 0 means the click missed every drawn primitive (depth was + * never written, so the clear value sticks), and we return null. + * + * Pixel coords use the WebGPU/screen convention (origin top-left, y + * increases downward); NDC y is inverted to match the camera's + * projection matrix. + */ +function unprojectPickSample( + viewProj: Float32Array, + pickX: number, + pickY: number, + width: number, + height: number, + depth: number, +): { x: number; y: number; z: number } | null { + if (!Number.isFinite(depth) || depth <= 0) return null; + const ndcX = ((pickX + 0.5) / width) * 2 - 1; + const ndcY = 1 - ((pickY + 0.5) / height) * 2; + const inv = MathUtils.invert({ m: viewProj }); + if (!inv) return null; + return MathUtils.transformPoint(inv, { x: ndcX, y: ndcY, z: depth }); +} /** Point-pick sizing parameters forwarded to the GPU pipeline. */ export interface PointPickSizing { @@ -46,7 +75,10 @@ export class Picker { this.depthTexture = this.device.createTexture({ size: { width, height }, format: 'depth32float', - usage: GPUTextureUsage.RENDER_ATTACHMENT, + // COPY_SRC so we can read the depth texel at the click position + // back to the CPU and unproject to recover the world-space hit + // point for hover tooltips / measurements. + usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC, }); // Create uniform buffer for viewProj matrix only (16 floats = 64 bytes) @@ -168,40 +200,212 @@ export class Picker { pointNodes?: ReadonlyArray, pointSizing?: PointPickSizing, ): Promise { - // Resize textures if needed + const encoder = this.renderPickPass(width, height, meshes, viewProj, pointNodes, pointSizing); + + // Clamp the texel origin to the texture bounds. Math.floor(x/y) can + // be -1 or equal to width/height on border clicks (and on + // pointer-captured drags that leave the canvas), and either makes + // copyTextureToBuffer reject the submit. pickRect already guards + // this path; pick() needs the same. + const sampleX = Math.max(0, Math.min(width - 1, Math.floor(x))); + const sampleY = Math.max(0, Math.min(height - 1, Math.floor(y))); + + // Read pixel at click position. WebGPU requires bytesPerRow to be a + // multiple of 256 for copyTextureToBuffer, even for a 1×1 read. + const BYTES_PER_ROW = 256; + const readBuffer = this.device.createBuffer({ + size: BYTES_PER_ROW, + usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, + }); + encoder.copyTextureToBuffer( + { + texture: this.colorTexture, + origin: { x: sampleX, y: sampleY, z: 0 }, + }, + { buffer: readBuffer, bytesPerRow: BYTES_PER_ROW, rowsPerImage: 1 }, + { width: 1, height: 1 }, + ); + + // Parallel depth-texel readback so the host can unproject the click + // to a world-space hit point. depth32float = 4 bytes per texel; we + // pad the row to 256 like the color readback. + const depthBuffer = this.device.createBuffer({ + size: BYTES_PER_ROW, + usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, + }); + encoder.copyTextureToBuffer( + { + texture: this.depthTexture, + origin: { x: sampleX, y: sampleY, z: 0 }, + aspect: 'depth-only', + }, + { buffer: depthBuffer, bytesPerRow: BYTES_PER_ROW, rowsPerImage: 1 }, + { width: 1, height: 1 }, + ); + + this.device.queue.submit([encoder.finish()]); + // GPUMapMode.READ = 1 (WebGPU spec) + await Promise.all([readBuffer.mapAsync(1), depthBuffer.mapAsync(1)]); + const sample = new Uint32Array(readBuffer.getMappedRange())[0]; + const depth = new Float32Array(depthBuffer.getMappedRange())[0]; + readBuffer.unmap(); + depthBuffer.unmap(); + readBuffer.destroy(); + depthBuffer.destroy(); + + const decoded = decodePickSample(sample); + if (decoded.kind === 'none') return null; + + // Unproject (x, y, depth) → world space. Reverse-Z keeps depth in + // [0, 1] (1 = near, 0 = far) — same NDC convention as the camera + // raycaster, so MathUtils.transformPoint with the inverse viewProj + // gives the world hit position directly. + const worldXYZ = unprojectPickSample(viewProj, sampleX, sampleY, width, height, depth); + + if (decoded.kind === 'point') { + // Look up the asset for modelIndex. expressId is already the + // federated globalId (vertex shader writes it from the per-point + // attribute, no lookup table needed). + const node = pointNodes?.find((n) => (n.expressId >>> 0) === decoded.pointExpressId); + return { + expressId: decoded.pointExpressId, + modelIndex: node?.modelIndex, + worldXYZ: worldXYZ ?? undefined, + }; + } + + // Mesh hit — meshIndex is (actual index + 1), already validated > 0. + const mesh = meshes[decoded.meshIndexPlusOne - 1]; + if (!mesh) return null; + return { + expressId: mesh.expressId, + modelIndex: mesh.modelIndex, + worldXYZ: worldXYZ ?? undefined, + }; + } + + updateUniforms(viewProj: Float32Array): void { + // Update viewProj matrix only + this.device.queue.writeBuffer(this.uniformBuffer, 0, viewProj); + } + + /** + * Rectangle pick: render the pick pass once, then read back every + * texel inside `[x0, y0]..[x1, y1]` and dedupe the hit set. Returns + * a `Set` for both meshes and point clouds. + * + * Used by the Shift+drag rectangle-selection UI; not meant for + * sustained use because the readback grows with rect area. A 800×600 + * rect = 480k pixels = ~2 MB transfer, fine for one-shot but we'd + * want a GPU-side dedupe for sustained marquee selection. + */ + async pickRect( + x0: number, + y0: number, + x1: number, + y1: number, + width: number, + height: number, + meshes: Mesh[], + viewProj: Float32Array, + pointNodes?: ReadonlyArray, + pointSizing?: PointPickSizing, + ): Promise> { + // Normalise + clip rect to texture bounds. + const lx = Math.max(0, Math.floor(Math.min(x0, x1))); + const ly = Math.max(0, Math.floor(Math.min(y0, y1))); + const hx = Math.min(width - 1, Math.floor(Math.max(x0, x1))); + const hy = Math.min(height - 1, Math.floor(Math.max(y0, y1))); + const rectW = hx - lx + 1; + const rectH = hy - ly + 1; + if (rectW <= 0 || rectH <= 0) return new Set(); + + const encoder = this.renderPickPass(width, height, meshes, viewProj, pointNodes, pointSizing); + + // copyTextureToBuffer requires bytesPerRow to be a multiple of 256. + // r32uint = 4 bytes per texel. Round up to nearest 256. + const rawRowBytes = rectW * 4; + const rowStride = Math.ceil(rawRowBytes / 256) * 256; + const readBuffer = this.device.createBuffer({ + size: rowStride * rectH, + usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, + }); + encoder.copyTextureToBuffer( + { + texture: this.colorTexture, + origin: { x: lx, y: ly, z: 0 }, + }, + { buffer: readBuffer, bytesPerRow: rowStride, rowsPerImage: rectH }, + { width: rectW, height: rectH }, + ); + this.device.queue.submit([encoder.finish()]); + await readBuffer.mapAsync(1); + const view = new Uint32Array(readBuffer.getMappedRange()); + const ids = new Set(); + const stridePx = rowStride / 4; + for (let y = 0; y < rectH; y++) { + const row = y * stridePx; + for (let x = 0; x < rectW; x++) { + const sample = view[row + x]; + if (sample === 0) continue; + const decoded = decodePickSample(sample); + if (decoded.kind === 'none') continue; + if (decoded.kind === 'point') { + ids.add(decoded.pointExpressId); + } else { + const mesh = meshes[decoded.meshIndexPlusOne - 1]; + if (mesh) ids.add(mesh.expressId); + } + } + } + readBuffer.unmap(); + readBuffer.destroy(); + return ids; + } + + /** + * Render the picker pass into `colorTexture` + `depthTexture` and + * return the still-open command encoder so the caller can append a + * `copyTextureToBuffer` for either a single texel (`pick`) or a + * whole rect (`pickRect`) before submitting. + */ + private renderPickPass( + width: number, + height: number, + meshes: Mesh[], + viewProj: Float32Array, + pointNodes?: ReadonlyArray, + pointSizing?: PointPickSizing, + ): GPUCommandEncoder { if (this.colorTexture.width !== width || this.colorTexture.height !== height) { this.colorTexture.destroy(); this.depthTexture.destroy(); - this.colorTexture = this.device.createTexture({ size: { width, height }, format: 'r32uint', usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC, }); - this.depthTexture = this.device.createTexture({ size: { width, height }, format: 'depth32float', - usage: GPUTextureUsage.RENDER_ATTACHMENT, + // COPY_SRC so single-pixel pick can read depth back for the + // hover-XYZ unprojection. Rect pick doesn't sample depth but + // costs nothing to keep the flag set. + usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC, }); } - - // Recreate texture views each time to avoid reuse issues - // WebGPU texture views cannot be reused after being submitted + // WebGPU texture views can't be reused after submit, so build fresh ones. const colorView = this.colorTexture.createView(); const depthView = this.depthTexture.createView(); - // Render picker pass const encoder = this.device.createCommandEncoder(); const pass = encoder.beginRenderPass({ - colorAttachments: [ - { - view: colorView, - loadOp: 'clear', - clearValue: { r: 0, g: 0, b: 0, a: 0 }, - storeOp: 'store', - }, - ], + colorAttachments: [{ + view: colorView, + loadOp: 'clear', + clearValue: { r: 0, g: 0, b: 0, a: 0 }, + storeOp: 'store', + }], depthStencilAttachment: { view: depthView, depthClearValue: 0.0, // Reverse-Z: clear to 0.0 (far plane) @@ -210,119 +414,40 @@ export class Picker { }, }); - // Resize buffer if needed (safety net for very large models) if (meshes.length > this.maxMeshes) { this.resizeExpressIdBuffer(meshes.length); } - - // Upload viewProj matrix to uniform buffer (once for all meshes) this.device.queue.writeBuffer(this.uniformBuffer, 0, viewProj); - - // Build mesh index array (index + 1, so 0 = no hit) - // Using mesh index instead of expressId to properly support multi-model with overlapping expressIds const meshIndexArray = new Uint32Array(meshes.length); for (let i = 0; i < meshes.length; i++) { - if (meshes[i]) { - meshIndexArray[i] = i + 1; // +1 so 0 means no hit - } + if (meshes[i]) meshIndexArray[i] = i + 1; // +1 so 0 means no hit } this.device.queue.writeBuffer(this.expressIdBuffer, 0, meshIndexArray); pass.setPipeline(this.pipeline); pass.setBindGroup(0, this.bindGroup); - - // Draw each mesh with its index as the first instance - // The shader will use this instance_index to look up the expressId for (let i = 0; i < meshes.length; i++) { const mesh = meshes[i]; if (!mesh) continue; - pass.setVertexBuffer(0, mesh.vertexBuffer); pass.setIndexBuffer(mesh.indexBuffer, 'uint32'); - // Draw 1 instance, starting at instance i (so instance_index = i in shader) pass.drawIndexed(mesh.indexCount, 1, 0, 0, i); } - // Point splats share the depth buffer with the mesh pass so occlusion - // is correct: a triangle in front of a point hides the point and - // vice versa. Lazily instantiate the point pipeline — it costs a - // shader compile, no point spending it on IFC-only sessions. if (pointNodes && pointNodes.length > 0) { if (!this.pointPicker) { this.pointPicker = new PointPicker(this.webgpuDevice); } const sz = pointSizing ?? { sizeMode: 0, worldRadius: 0.02, pointSizePx: 4 }; - this.pointPicker.drawIntoPass( - pass, - pointNodes, - viewProj, - { width, height }, - { - sizeMode: sz.sizeMode, - worldRadius: sz.worldRadius, - pointSizePx: sz.pointSizePx, - clickTolerancePx: sz.clickTolerancePx ?? 2, - }, - ); + this.pointPicker.drawIntoPass(pass, pointNodes, viewProj, { width, height }, { + sizeMode: sz.sizeMode, + worldRadius: sz.worldRadius, + pointSizePx: sz.pointSizePx, + clickTolerancePx: sz.clickTolerancePx ?? 2, + }); } - pass.end(); - - // Read pixel at click position - // WebGPU requires bytesPerRow to be a multiple of 256 - const BYTES_PER_ROW = 256; - const readBuffer = this.device.createBuffer({ - size: BYTES_PER_ROW, - usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, - }); - - encoder.copyTextureToBuffer( - { - texture: this.colorTexture, - origin: { x: Math.floor(x), y: Math.floor(y), z: 0 }, - }, - { - buffer: readBuffer, - bytesPerRow: BYTES_PER_ROW, - rowsPerImage: 1, - }, - { width: 1, height: 1 } - ); - - this.device.queue.submit([encoder.finish()]); - // GPUMapMode.READ = 1 (WebGPU spec) - await readBuffer.mapAsync(1); // GPUMapMode.READ - const data = new Uint32Array(readBuffer.getMappedRange()); - const sample = data[0]; - readBuffer.unmap(); - readBuffer.destroy(); - - const decoded = decodePickSample(sample); - if (decoded.kind === 'none') return null; - - if (decoded.kind === 'point') { - // Look up the asset for modelIndex. expressId is already the - // federated globalId (vertex shader writes it from the per-point - // attribute, no lookup table needed). - const node = pointNodes?.find((n) => (n.expressId >>> 0) === decoded.pointExpressId); - return { - expressId: decoded.pointExpressId, - modelIndex: node?.modelIndex, - }; - } - - // Mesh hit — meshIndex is (actual index + 1), already validated > 0. - const mesh = meshes[decoded.meshIndexPlusOne - 1]; - if (!mesh) return null; - return { - expressId: mesh.expressId, - modelIndex: mesh.modelIndex, - }; - } - - updateUniforms(viewProj: Float32Array): void { - // Update viewProj matrix only - this.device.queue.writeBuffer(this.uniformBuffer, 0, viewProj); + return encoder; } /** diff --git a/packages/renderer/src/picking-manager.ts b/packages/renderer/src/picking-manager.ts index ce4442e18..2f1c34e94 100644 --- a/packages/renderer/src/picking-manager.ts +++ b/packages/renderer/src/picking-manager.ts @@ -217,4 +217,56 @@ export class PickingManager { ); return result; } + + /** + * GPU-based rectangle pick. Renders the same pick pass as `pick()`, + * then reads back every texel inside the rect and dedupes the hit + * set. Point splats and mesh triangles both participate. + * + * Rect coordinates are in CSS pixels; we scale to canvas pixels + * the same way `pick()` does. Visibility filters from `options` + * are applied to meshes before the pass; point nodes are not + * filtered (per-asset visibility is binary and the asset count is + * tiny). + * + * Limitations: skips the CPU-raycast and dynamic-mesh-creation + * fallbacks that `pick()` uses for very large batched models, so + * rect pick may miss entities whose individual meshes haven't been + * hydrated. Acceptable for an MVP — rect select is a power-user + * tool and the user can fall back to single-click pick. + */ + async pickRect( + x0: number, + y0: number, + x1: number, + y1: number, + options?: PickOptions, + ): Promise> { + if (!this.picker) return new Set(); + const rect = this.canvas.getBoundingClientRect(); + if (rect.width === 0 || rect.height === 0) return new Set(); + const scaleX = this.canvas.width / rect.width; + const scaleY = this.canvas.height / rect.height; + const sx0 = x0 * scaleX, sy0 = y0 * scaleY; + const sx1 = x1 * scaleX, sy1 = y1 * scaleY; + if (options?.isStreaming) return new Set(); + + let meshes = this.scene.getMeshes(); + if (options?.hiddenIds && options.hiddenIds.size > 0) { + meshes = meshes.filter((m) => !options.hiddenIds!.has(m.expressId)); + } + if (options?.isolatedIds !== null && options?.isolatedIds !== undefined) { + meshes = meshes.filter((m) => options.isolatedIds!.has(m.expressId)); + } + const viewProj = this.camera.getViewProjMatrix().m; + const pointSnap = this.pointPickProvider?.() ?? null; + return this.picker.pickRect( + sx0, sy0, sx1, sy1, + this.canvas.width, this.canvas.height, + meshes, + viewProj, + pointSnap?.nodes ?? undefined, + pointSnap?.sizing ?? undefined, + ); + } } diff --git a/packages/renderer/src/pointcloud/point-cloud-node.ts b/packages/renderer/src/pointcloud/point-cloud-node.ts index 5bb4596f4..168929b16 100644 --- a/packages/renderer/src/pointcloud/point-cloud-node.ts +++ b/packages/renderer/src/pointcloud/point-cloud-node.ts @@ -16,6 +16,13 @@ import { POINT_VERTEX_BYTES } from './point-pipeline.js'; export interface PointCloudGpuChunk { vertexBuffer: GPUBuffer; + /** + * Per-point signed-distance buffer. Always allocated alongside the + * vertex buffer (4 bytes per point) so the compute pass and splat + * pipeline can both bind it without a "deviation present?" branch. + * Initialised to zeros — `Renderer.computeDeviations` overwrites. + */ + deviationBuffer: GPUBuffer; pointCount: number; bbox: { min: [number, number, number]; max: [number, number, number] }; } @@ -70,6 +77,16 @@ export function createNode( }; } +/** + * Per-page-session counter for the vertex-buffer class-byte + * diagnostic. Mirrors the host-side log in `pointCloudIngest.ts` + * so the two can be cross-checked: if the host log shows non-zero + * classes but the vertex log shows all 0, the packing path is + * dropping them. + */ +const DEBUG_VERTEX_CLASS_LOG_LIMIT = 3; +let debugVertexClassLogs = 0; + /** Convert a renderer-agnostic chunk into a GPU vertex buffer + metadata. */ export function appendChunkToNode( device: GPUDevice, @@ -110,14 +127,45 @@ export function appendChunkToNode( u32[i * 6 + 5] = expressId; } + // Sanity-check the packed buffer: read back the class byte for + // the first few vertices so the console shows exactly what the + // splat shader will see at `rgbAndClass.a * 255`. Catches the + // case where the chunk had non-trivial classes but they got + // zeroed during packing (e.g. a buffer-view mismatch). + if (debugVertexClassLogs < DEBUG_VERTEX_CLASS_LOG_LIMIT && classes) { + debugVertexClassLogs++; + const sample: number[] = []; + for (let i = 0; i < Math.min(8, count); i++) { + sample.push(u8[i * POINT_VERTEX_BYTES + 15]); + } + console.log( + `[pointcloud-debug] vertex-buffer chunk #${debugVertexClassLogs}: ` + + `packed class bytes (offset +15) first8=[${sample.join(',')}]`, + ); + } + const vertexBuffer = device.createBuffer({ size: bytes.byteLength, - usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST, + // STORAGE so the deviation compute shader can read positions + // straight from the vertex buffer (avoids a duplicate copy). + usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE, }); device.queue.writeBuffer(vertexBuffer, 0, bytes); + // Pre-allocate the per-point deviation buffer (zero-initialised). + // Bound as a vertex attribute by the splat pipeline AND as a + // storage buffer by the deviation compute pass. + const deviationBuffer = device.createBuffer({ + size: count * 4, + usage: GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, + }); + // Zero-init explicitly — WebGPU spec doesn't promise zeroed buffers + // and some implementations skip the initial clear when STORAGE is set. + device.queue.writeBuffer(deviationBuffer, 0, new Float32Array(count)); + const gpuChunk: PointCloudGpuChunk = { vertexBuffer, + deviationBuffer, pointCount: count, bbox: chunk.bbox, }; @@ -152,6 +200,7 @@ export function uploadAssetToGpu( export function destroyNode(node: PointCloudNode): void { for (const chunk of node.chunks) { chunk.vertexBuffer.destroy(); + chunk.deviationBuffer.destroy(); } node.uniformBuffer.destroy(); node.chunks = []; diff --git a/packages/renderer/src/pointcloud/point-cloud-renderer.ts b/packages/renderer/src/pointcloud/point-cloud-renderer.ts index aa9ec614f..eeff0517b 100644 --- a/packages/renderer/src/pointcloud/point-cloud-renderer.ts +++ b/packages/renderer/src/pointcloud/point-cloud-renderer.ts @@ -26,6 +26,11 @@ import { type PointCloudNode, type PointCloudNodeMeta, } from './point-cloud-node.js'; +import { + writePointCloudUniforms, + type PointColorMode, + type PointSizeMode, +} from './point-cloud-uniforms.js'; export interface ResolvedSectionPlane { normal: [number, number, number]; @@ -34,12 +39,7 @@ export interface ResolvedSectionPlane { flipped?: boolean; } -export type PointColorMode = - | 'rgb' - | 'classification' - | 'intensity' - | 'height' - | 'fixed'; +export type { PointColorMode, PointSizeMode }; /** * How to size a splat on screen. @@ -51,21 +51,6 @@ export type PointColorMode = * blow up to half the screen when you nose into the * cloud — usually the best default for nav. */ -export type PointSizeMode = 'fixed-px' | 'adaptive-world' | 'attenuated'; - -const COLOR_MODE_INDEX: Record = { - rgb: 0, - classification: 1, - intensity: 2, - height: 3, - fixed: 4, -}; - -const SIZE_MODE_INDEX: Record = { - 'fixed-px': 0, - 'adaptive-world': 1, - 'attenuated': 2, -}; export interface PointCloudDrawState { /** column-major view-projection matrix (16 floats) */ @@ -91,6 +76,29 @@ export interface PointCloudRenderOptions { worldRadius?: number; /** Render splats as discs instead of squares. Defaults to true. */ roundShape?: boolean; + /** + * Per-ASPRS-class visibility bitmask. Bit `i` set → class `i` is + * visible. Defaults to `0xFFFFFFFF` (all 32 classes shown). Only + * affects points carrying classifications; meshes ignore it. + * Stored as an unsigned 32-bit integer in the uniform block. + */ + classMask?: number; + /** + * Stride-cull factor for the splat shader: 1 = render every point, + * 2 = every other, 4 = every fourth, etc. Used by the section-plane + * preview path so dragging a slider over a 100M-point scan stays + * responsive — UI flips this to e.g. 4 on drag start and back to 1 + * on drag end. Default 1. + */ + previewStride?: number; + /** + * BIM↔scan deviation heatmap range. `centerOffset` shifts the + * "white" point off zero (handy when a scan has a global offset + * from the model); `halfRange` is the metres mapped to ±1 on the + * blue→white→red ramp. Defaults to (0, 0.05) → ±5cm. + * Only consulted when `colorMode === 'deviation'`. + */ + deviationRange?: { centerOffset: number; halfRange: number }; } export interface PointCloudAssetHandle { @@ -123,6 +131,9 @@ export class PointCloudRenderer { sizeMode: 'attenuated', worldRadius: 0.02, roundShape: true, + classMask: 0xFFFFFFFF, + previewStride: 1, + deviationRange: { centerOffset: 0, halfRange: 0.05 }, }; constructor( @@ -142,6 +153,23 @@ export class PointCloudRenderer { if (opts.sizeMode !== undefined) this.options.sizeMode = opts.sizeMode; if (opts.worldRadius !== undefined) this.options.worldRadius = opts.worldRadius; if (opts.roundShape !== undefined) this.options.roundShape = opts.roundShape; + if (opts.classMask !== undefined) this.options.classMask = opts.classMask >>> 0; + if (opts.previewStride !== undefined) { + // Clamp to a sane positive integer — stride 0 would divide by + // zero in the shader's modulo. >256 is silly but harmless. + const s = Math.max(1, Math.min(256, Math.floor(opts.previewStride) || 1)); + this.options.previewStride = s; + } + if (opts.deviationRange !== undefined) { + const r = opts.deviationRange; + this.options.deviationRange = { + centerOffset: Number.isFinite(r.centerOffset) ? r.centerOffset : 0, + // halfRange = 0 would divide by zero in the shader; clamp to + // a tiny positive value so dragging the slider to the floor + // doesn't NaN the colour. + halfRange: Number.isFinite(r.halfRange) && r.halfRange > 0 ? r.halfRange : 1e-6, + }; + } } getOptions(): Readonly> { @@ -244,6 +272,15 @@ export class PointCloudRenderer { return this.nodes.size; } + /** + * Iterate every uploaded node. Exposed so the deviation compute + * pass can reach each node's vertex + deviation buffers without + * the renderer having to mirror its internal map. + */ + getInternalNodes(): Iterable { + return this.nodes.values(); + } + /** Total number of points currently uploaded across all assets. */ getPointCount(): number { let total = 0; @@ -310,78 +347,43 @@ export class PointCloudRenderer { const viewportH = Math.max(1, state.viewport?.height ?? 1); for (const node of this.nodes.values()) { - this.writeUniforms( + writePointCloudUniforms( + this.device, + this.uniformScratch, + this.uniformScratchU32, node, - state.viewProj, - normal, - distance, - enabled, - heightMin, - heightMax, - viewportW, - viewportH, + { + viewProj: state.viewProj, + fixedColor: this.options.fixedColor, + colorMode: this.options.colorMode, + sizeMode: this.options.sizeMode, + pointSize: this.options.pointSize, + worldRadius: this.options.worldRadius, + roundShape: this.options.roundShape, + sectionNormal: normal, + sectionDist: distance, + sectionEnabled: enabled, + heightMin, + heightMax, + viewportW, + viewportH, + classMask: this.options.classMask, + previewStride: this.options.previewStride, + deviationCenterOffset: this.options.deviationRange.centerOffset, + deviationHalfRange: this.options.deviationRange.halfRange, + }, ); pass.setBindGroup(0, node.bindGroup); for (const chunk of node.chunks) { pass.setVertexBuffer(0, chunk.vertexBuffer); + // 2nd buffer: per-point deviation float (location 4 in shader). + pass.setVertexBuffer(1, chunk.deviationBuffer); // Six verts per splat, one instance per source point. pass.draw(POINT_QUAD_VERTS, chunk.pointCount, 0, 0); } } } - private writeUniforms( - node: PointCloudNode, - viewProj: Float32Array, - sectionNormal: [number, number, number], - sectionDist: number, - sectionEnabled: boolean, - heightMin: number, - heightMax: number, - viewportW: number, - viewportH: number, - ): void { - const u = this.uniformScratch; - const uU32 = this.uniformScratchU32; - - // viewProj — floats 0..15 - u.set(viewProj.subarray(0, 16), 0); - // model — floats 16..31 (identity for now; per-asset transforms can be added later) - u.fill(0, 16, 32); - u[16] = 1; u[21] = 1; u[26] = 1; u[31] = 1; - // colorOverride — floats 32..35 - u[32] = this.options.fixedColor[0]; - u[33] = this.options.fixedColor[1]; - u[34] = this.options.fixedColor[2]; - u[35] = this.options.fixedColor[3]; - // colorModeAndExtras — floats 36..39 (mode, pointSize, heightMin, heightMax) - u[36] = COLOR_MODE_INDEX[this.options.colorMode]; - u[37] = this.options.pointSize; - u[38] = heightMin; - u[39] = heightMax; - // sizing — floats 40..43 (sizeMode, worldRadius, viewportW, viewportH) - u[40] = SIZE_MODE_INDEX[this.options.sizeMode]; - u[41] = this.options.worldRadius; - u[42] = viewportW; - u[43] = viewportH; - // sectionPlane — floats 44..47 - u[44] = sectionNormal[0]; - u[45] = sectionNormal[1]; - u[46] = sectionNormal[2]; - u[47] = sectionDist; - // flags (u32 view) — bytes 192..207 = u32 indices 48..51 - // flags.x = the asset's CURRENT expressId. The shader uses this - // when non-zero so the federation registry can relabel a streamed - // asset post-upload (its per-vertex entityId attribute is baked - // at upload and would otherwise stay at the synthetic local ID). - uU32[48] = node.meta.expressId >>> 0; - uU32[49] = sectionEnabled ? 1 : 0; - uU32[50] = this.options.roundShape ? 1 : 0; - uU32[51] = 0; - - this.device.queue.writeBuffer(node.uniformBuffer, 0, u.buffer, u.byteOffset, POINT_UNIFORM_SIZE); - } - /** * Resolve a packed objectId rgba8 sample back to the asset that owns it. * Returns null when the sample doesn't match any asset's expressId. diff --git a/packages/renderer/src/pointcloud/point-cloud-uniforms.ts b/packages/renderer/src/pointcloud/point-cloud-uniforms.ts new file mode 100644 index 000000000..902f91867 --- /dev/null +++ b/packages/renderer/src/pointcloud/point-cloud-uniforms.ts @@ -0,0 +1,133 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +/** + * Uniform layout + writer for the point-cloud render pipeline. + * + * Extracted from `point-cloud-renderer.ts` to keep the orchestration + * class small. The layout matches `point-shader.wgsl.ts` byte-for-byte; + * any shader edit needs to come back here too. + */ + +import { POINT_UNIFORM_SIZE } from './point-pipeline.js'; +import type { PointCloudNode } from './point-cloud-node.js'; + +export type PointColorMode = + | 'rgb' + | 'classification' + | 'intensity' + | 'height' + | 'fixed' + | 'deviation'; + +export type PointSizeMode = 'fixed-px' | 'adaptive-world' | 'attenuated'; + +export const COLOR_MODE_INDEX: Record = { + rgb: 0, + classification: 1, + intensity: 2, + height: 3, + fixed: 4, + deviation: 5, +}; + +export const SIZE_MODE_INDEX: Record = { + 'fixed-px': 0, + 'adaptive-world': 1, + 'attenuated': 2, +}; + +export interface PointUniformInputs { + viewProj: Float32Array; + fixedColor: [number, number, number, number]; + colorMode: PointColorMode; + sizeMode: PointSizeMode; + pointSize: number; + worldRadius: number; + roundShape: boolean; + sectionNormal: [number, number, number]; + sectionDist: number; + sectionEnabled: boolean; + heightMin: number; + heightMax: number; + viewportW: number; + viewportH: number; + /** Per-ASPRS-class visibility bitmask (32 bits = LAS 1.4 classes). */ + classMask: number; + /** Preview stride — 1 = full density, N = render every Nth point. */ + previewStride: number; + /** BIM ↔ scan deviation heatmap range (metres). */ + deviationCenterOffset: number; + deviationHalfRange: number; +} + +/** + * Pack the per-asset point-cloud uniform block into `scratch` and copy + * it onto the GPU. The two scratch typed-arrays must alias the same + * underlying buffer so we can write floats and packed u32 flags in one + * pass. + */ +export function writePointCloudUniforms( + device: GPUDevice, + scratch: Float32Array, + scratchU32: Uint32Array, + node: PointCloudNode, + inputs: PointUniformInputs, +): void { + const u = scratch; + const uU32 = scratchU32; + + // viewProj — floats 0..15 + u.set(inputs.viewProj.subarray(0, 16), 0); + // model — floats 16..31 (identity for now; per-asset transforms can be added later) + u.fill(0, 16, 32); + u[16] = 1; u[21] = 1; u[26] = 1; u[31] = 1; + // colorOverride — floats 32..35 + u[32] = inputs.fixedColor[0]; + u[33] = inputs.fixedColor[1]; + u[34] = inputs.fixedColor[2]; + u[35] = inputs.fixedColor[3]; + // colorModeAndExtras — floats 36..39 (mode, pointSize, heightMin, heightMax) + u[36] = COLOR_MODE_INDEX[inputs.colorMode]; + u[37] = inputs.pointSize; + u[38] = inputs.heightMin; + u[39] = inputs.heightMax; + // sizing — floats 40..43 (sizeMode, worldRadius, viewportW, viewportH) + u[40] = SIZE_MODE_INDEX[inputs.sizeMode]; + u[41] = inputs.worldRadius; + u[42] = inputs.viewportW; + u[43] = inputs.viewportH; + // sectionPlane — floats 44..47 + u[44] = inputs.sectionNormal[0]; + u[45] = inputs.sectionNormal[1]; + u[46] = inputs.sectionNormal[2]; + u[47] = inputs.sectionDist; + // flags (u32 view) — bytes 192..207 = u32 indices 48..51 + // flags.x = the asset's CURRENT expressId. The shader uses this + // when non-zero so the federation registry can relabel a streamed + // asset post-upload (its per-vertex entityId attribute is baked + // at upload and would otherwise stay at the synthetic local ID). + // flags.w (u32 slot 51) = ASPRS class-visibility mask. Bit i set → class i shown. + uU32[48] = node.meta.expressId >>> 0; + uU32[49] = inputs.sectionEnabled ? 1 : 0; + uU32[50] = inputs.roundShape ? 1 : 0; + uU32[51] = inputs.classMask >>> 0; + // extras (u32 slots 52..55) — extras.x = previewStride, yzw reserved. + uU32[52] = inputs.previewStride >>> 0; + uU32[53] = 0; + uU32[54] = 0; + uU32[55] = 0; + // deviationRange (f32 slots 56..59) — center, halfRange, _, _. + u[56] = inputs.deviationCenterOffset; + u[57] = inputs.deviationHalfRange; + u[58] = 0; + u[59] = 0; + + // Pass the typed array directly — TypeScript widens `.buffer` to + // `ArrayBufferLike` here (vs. `ArrayBuffer` on a class field), which + // doesn't satisfy `writeBuffer`'s parameter type. Slicing the typed + // array view to exactly the uniform size + 4 alignment is identical + // to the byteOffset/byteLength form on the buffer. + device.queue.writeBuffer(node.uniformBuffer, 0, u, 0, POINT_UNIFORM_SIZE / 4); +} diff --git a/packages/renderer/src/pointcloud/point-pipeline.ts b/packages/renderer/src/pointcloud/point-pipeline.ts index e9e079f69..11c490ccc 100644 --- a/packages/renderer/src/pointcloud/point-pipeline.ts +++ b/packages/renderer/src/pointcloud/point-pipeline.ts @@ -32,9 +32,15 @@ import { pointShaderSource } from './point-shader.wgsl.js'; * [36..39] colorModeAndExtras (mode, pointSizePx, heightMin, heightMax) * [40..43] sizing (sizeMode, worldRadius, viewportW, viewportH) * [44..47] sectionPlane (nx, ny, nz, distance) - * [48..51] flags (u32 view: x=unused, y=sectionEnabled, z=roundShape, w=unused) + * [48..51] flags (u32 view: x=expressId, y=sectionEnabled, z=roundShape, w=classMask) + * [52..55] extras (u32 view: x=previewStride, yzw=unused) + * [56..59] deviationRange (centerOffset, halfRange, _, _) */ -export const POINT_UNIFORM_SIZE = 208; +// 15 vec4 slots × 16 bytes = 240. Was 208 before extras (PR-G's +// stride cull) and deviationRange (PR-H's BIM↔scan heatmap) both +// claimed their own slots — keeping them separate avoids overloading +// the flags / colourOverride slots and stays std140-friendly. +export const POINT_UNIFORM_SIZE = 240; export const POINT_VERTEX_BYTES = 24; /** Number of vertices emitted per splat (two triangles forming a quad). */ export const POINT_QUAD_VERTS = 6; @@ -85,6 +91,15 @@ export class PointRenderPipeline { { shaderLocation: 3, offset: 20, format: 'uint32' }, ], }, + { + // Per-point deviation float (BIM↔scan signed distance). + // Always present, zero when the user hasn't computed yet. + arrayStride: 4, + stepMode: 'instance', + attributes: [ + { shaderLocation: 4, offset: 0, format: 'float32' }, + ], + }, ], }, fragment: { diff --git a/packages/renderer/src/pointcloud/point-shader.wgsl.ts b/packages/renderer/src/pointcloud/point-shader.wgsl.ts index d63bb33ce..7318e7e45 100644 --- a/packages/renderer/src/pointcloud/point-shader.wgsl.ts +++ b/packages/renderer/src/pointcloud/point-shader.wgsl.ts @@ -33,8 +33,15 @@ export const pointShaderSource = ` sizing: vec4, sectionPlane: vec4, // x = assetExpressId (federation-aware globalId), y = sectionEnabled, - // z = roundShape, w = unused + // z = roundShape, w = ASPRS class-visibility bitmask (bit i → class i) flags: vec4, + // x = previewStride (1 = render every point, N = render every + // Nth instance — used by the section-plane drag preview path). + // yzw reserved for future per-frame state. + extras: vec4, + // x = deviation centerOffset (m), y = deviation halfRange (m). + // Used by colorMode 5 (BIM↔scan deviation heatmap). + deviationRange: vec4, } @binding(0) @group(0) var uniforms: PointUniforms; @@ -43,6 +50,11 @@ export const pointShaderSource = ` @location(1) rgbAndClass: vec4, // unorm8x4 → 0..1 each @location(2) intensityPacked: u32, // low 16 bits = intensity @location(3) entityId: u32, + // BIM↔scan signed distance, populated by the deviation compute + // pass. Zero when the user hasn't computed yet (or when no + // mesh is loaded). Bound from a separate vertex buffer so the + // existing 24-byte-per-point layout stays unchanged. + @location(4) deviation: f32, } struct VertexOutput { @@ -76,6 +88,23 @@ export const pointShaderSource = ` } } + // Diverging blue → white → red ramp for the BIM↔scan deviation + // heatmap. t is in [-1, 1] where −1 = scan-far on the negative + // side of the surface, 0 = exactly on surface, +1 = scan-far on + // the positive (outward-normal) side. Negative side (typically + // "inside" / "before" the wall) is blue; positive ("outside" / + // "past" the wall) is red. + fn deviation_ramp(t: f32) -> vec3 { + let s = clamp(t, -1.0, 1.0); + if (s < 0.0) { + // Cool side: deep blue → white as |t| → 0. + let k = s + 1.0; // [-1..0] → [0..1] + return mix(vec3(0.10, 0.30, 0.85), vec3(0.95, 0.95, 0.95), k); + } + // Warm side: white → red as t → 1. + return mix(vec3(0.95, 0.95, 0.95), vec3(0.85, 0.20, 0.10), s); + } + fn height_ramp(t: f32) -> vec3 { let s = clamp(t, 0.0, 1.0); if (s < 0.25) { @@ -94,7 +123,27 @@ export const pointShaderSource = ` } @vertex - fn vs_main(input: VertexInput, @builtin(vertex_index) vId: u32) -> VertexOutput { + fn vs_main( + input: VertexInput, + @builtin(vertex_index) vId: u32, + @builtin(instance_index) iId: u32, + ) -> VertexOutput { + // Preview-density stride cull. UI sets extras.x to e.g. 4 + // while the user drags a section-plane slider so we render + // every 4th point and the drag stays responsive on huge scans. + // stride <= 1 is the no-op default. + let stride = max(1u, uniforms.extras.x); + if (stride > 1u && (iId % stride) != 0u) { + var skipped: VertexOutput; + // Push behind the near plane so the rasteriser drops it. + skipped.position = vec4(0.0, 0.0, -2.0, 1.0); + skipped.color = vec4(0.0); + skipped.worldPos = vec3(0.0); + skipped.entityId = 0u; + skipped.quadUv = vec2(0.0); + return skipped; + } + // Quad corners (two triangles, CCW) in unit disc coords: // tri 1: (-1,-1)(1,-1)(1,1) // tri 2: (-1,-1)(1, 1)(-1,1) @@ -148,6 +197,24 @@ export const pointShaderSource = ` let mode = u32(uniforms.colorModeAndExtras.x); let intensity01 = f32(input.intensityPacked & 0xffffu) / 65535.0; let classId = u32(round(input.rgbAndClass.a * 255.0)); + + // Per-class visibility — flags.w is a 32-bit mask. Class ids + // outside 0..31 always show (the mask only covers ASPRS LAS 1.4 + // standard classes). Hidden classes get pushed behind the near + // plane via a degenerate clipPos so they're culled before + // rasterisation; cheaper than fragment-stage discard. + if (classId < 32u) { + let bit = (uniforms.flags.w >> classId) & 1u; + if (bit == 0u) { + var output: VertexOutput; + output.position = vec4(0.0, 0.0, -2.0, 1.0); // outside [0,1] reverse-Z → culled + output.color = vec4(0.0); + output.worldPos = vec3(0.0); + output.entityId = 0u; + output.quadUv = vec2(0.0); + return output; + } + } let heightT = (worldPos4.y - uniforms.colorModeAndExtras.z) / max(1e-6, uniforms.colorModeAndExtras.w - uniforms.colorModeAndExtras.z); @@ -159,6 +226,15 @@ export const pointShaderSource = ` case 2u: { rgb = vec3(intensity01, intensity01, intensity01); } case 3u: { rgb = height_ramp(heightT); } case 4u: { rgb = uniforms.colorOverride.rgb; } + case 5u: { + // Deviation: shift by centerOffset so a non-zero baseline + // can be re-zeroed (handy when a scan has a global offset + // from the model). halfRange = 0 falls through to white. + let center = uniforms.deviationRange.x; + let half = max(uniforms.deviationRange.y, 1e-6); + let dt = (input.deviation - center) / half; + rgb = deviation_ramp(dt); + } default: { rgb = input.rgbAndClass.rgb; } } diff --git a/packages/renderer/src/scene.ts b/packages/renderer/src/scene.ts index 7462cc4ad..abf2017eb 100644 --- a/packages/renderer/src/scene.ts +++ b/packages/renderer/src/scene.ts @@ -334,6 +334,27 @@ export class Scene { * Get all MeshData pieces for an expressId (without merging). * Optionally filter by modelIndex for multi-model safety. */ + /** + * Iterate every CPU-side `MeshData` the scene holds — every piece + * for every expressId across every model. Used by the BIM ↔ scan + * deviation BVH builder which needs world-space triangle positions + * regardless of which IFC ingest path they came from. + * + * Deduplicates by `MeshData` identity: a colour-merged batch is + * stored under every contributor's expressId, and visiting it + * multiple times would double-count its triangles in the BVH. + */ + forEachMeshData(visit: (md: MeshData) => void): void { + const seen = new Set(); + for (const pieces of this.meshDataMap.values()) { + for (const piece of pieces) { + if (seen.has(piece)) continue; + seen.add(piece); + visit(piece); + } + } + } + getMeshDataPieces(expressId: number, modelIndex?: number): MeshData[] | undefined { let pieces = this.meshDataMap.get(expressId); if (!pieces || pieces.length === 0) return undefined; diff --git a/packages/renderer/src/types.ts b/packages/renderer/src/types.ts index e0e28e05b..5c1f0106a 100644 --- a/packages/renderer/src/types.ts +++ b/packages/renderer/src/types.ts @@ -229,4 +229,11 @@ export interface PickOptions { export interface PickResult { expressId: number; modelIndex?: number; // Index of the model this entity belongs to + /** + * World-space XYZ of the picked surface point. Optional because the + * pick path can skip depth readback for callers that only need the + * entityId (e.g. selection state). Recovered by sampling the pick + * pass's depth texture at the click position and unprojecting. + */ + worldXYZ?: { x: number; y: number; z: number }; }