diff --git a/apps/typegpu-docs/src/content/docs/fundamentals/buffers.mdx b/apps/typegpu-docs/src/content/docs/fundamentals/buffers.mdx index e4cc924e7..bd07896ea 100644 --- a/apps/typegpu-docs/src/content/docs/fundamentals/buffers.mdx +++ b/apps/typegpu-docs/src/content/docs/fundamentals/buffers.mdx @@ -216,6 +216,100 @@ If you pass an unmapped buffer, the data will be written to the buffer using `GP If you passed your own buffer to the `root.createBuffer` function, you need to ensure it has the `GPUBufferUsage.COPY_DST` usage flag if you want to write to it using the `write` method. ::: +### Permissive vector inputs + +For vector fields and buffers, `.write()` accepts three equivalent forms - you don't need to construct a typed vec instance: + +| Input form | Example for `vec3f` | Notes | +|---|---|---| +| Typed vec instance | `d.vec3f(1, 2, 3)` | Allocates a TypeGPU wrapper object | +| Plain JS tuple | `[1, 2, 3]` | No TypeGPU wrapper allocated | +| TypedArray | `new Float32Array([1, 2, 3])` | No TypeGPU wrapper allocated | + +When the data already lives in tuples or typed arrays, skipping vec construction avoids allocating TypeGPU wrapper objects, which can reduce garbage-collector pressure in hot paths such as per-frame updates or simulation ticks. + +```ts twoslash +import tgpu, { d } from 'typegpu'; +const root = await tgpu.init(); +// ---cut--- +const vecBuffer = root.createBuffer(d.vec3f); + +vecBuffer.write(d.vec3f(1, 2, 3)); // typed instance +vecBuffer.write([1, 2, 3]); // plain tuple +vecBuffer.write(new Float32Array([1, 2, 3])); // TypedArray +``` + +For **arrays of vectors**, a single flat TypedArray is also accepted. TypeGPU automatically handles the stride difference between the packed input and the padded GPU layout (e.g. `vec3f` occupies 16 bytes in a buffer, but only 3 floats in the TypedArray). When your data already lives in a flat TypedArray, this avoids both per-element wrapper allocation and array construction overhead: + +```ts twoslash +import tgpu, { d } from 'typegpu'; +const root = await tgpu.init(); +// ---cut--- +const arrBuffer = root.createBuffer(d.arrayOf(d.vec3f, 2)); + +// Array of instances, tuples, or per-element TypedArrays - all equivalent: +arrBuffer.write([d.vec3f(1, 2, 3), d.vec3f(4, 5, 6)]); +arrBuffer.write([[1, 2, 3], [4, 5, 6]]); + +// Flat Float32Array - no per-element wrapper construction: +arrBuffer.write(new Float32Array([1, 2, 3, 4, 5, 6])); +``` + +The same rules apply to scalar arrays - a `Float32Array` (or `Int32Array` / `Uint32Array`) can be passed directly to a buffer of `arrayOf(d.f32, n)` (or `i32` / `u32`). + +### Writing a slice + +You can write a contiguous slice of data into a buffer using the optional second argument of `.write()`. +Pass the values to write along with `startOffset` - the byte position at which writing begins. + +:::tip +Use `d.memoryLayoutOf` to obtain the correct byte offset for a given schema element without having to manually calculate it. +::: + +```ts twoslash +import tgpu, { d } from 'typegpu'; +const root = await tgpu.init(); +// ---cut--- +const schema = d.arrayOf(d.u32, 6); +const buffer = root.createBuffer(schema, [0, 1, 2, 0, 0, 0]); + +// Get the byte offset of element [3] +const layout = d.memoryLayoutOf(schema, (a) => a[3]); + +// Write [4, 5, 6] starting at element [3], leaving [0, 1, 2] untouched +buffer.write([4, 5, 6], { startOffset: layout.offset }); +const data = await buffer.read(); // will be [0, 1, 2, 4, 5, 6] +``` + +An optional `endOffset` specifies the byte offset at which writing stops entirely. +Combined with `startOffset` and `d.memoryLayoutOf`, this lets you write to a precise region of the buffer. + +:::note +Both offsets are **byte-based**. Any component whose byte position falls at or beyond `endOffset` is not written, which means offsets that do not align to schema element boundaries can result in partial elements being written. Use `d.memoryLayoutOf` to target whole elements safely. +::: + +```ts twoslash +import tgpu, { d } from 'typegpu'; +const root = await tgpu.init(); +// ---cut--- +const schema = d.arrayOf(d.vec3u, 4); +const buffer = root.createBuffer(schema); + +// Get the byte offsets of element [1] (start) and element [2] (stop) +const startLayout = d.memoryLayoutOf(schema, (a) => a[1]); +const endLayout = d.memoryLayoutOf(schema, (a) => a[2]); + +// Write one vec3u at element [1], stopping before element [2] +buffer.write([d.vec3u(4, 5, 6)], { + startOffset: startLayout.offset, + endOffset: endLayout.offset, +}); +``` + +:::note +In this particular case the `writePartial` method described in the next section would be a more convenient option, but the `startOffset` and `endOffset` options are useful for writing bigger slices of data. +::: + ### Partial writes When you want to update only a subset of a buffer’s fields, you can use the `.writePartial(data)` method. This method updates only the fields provided in the `data` object and leaves the rest unchanged. diff --git a/packages/typegpu/src/core/buffer/buffer.ts b/packages/typegpu/src/core/buffer/buffer.ts index d14ccf1f2..44bf866b3 100644 --- a/packages/typegpu/src/core/buffer/buffer.ts +++ b/packages/typegpu/src/core/buffer/buffer.ts @@ -1,16 +1,17 @@ import { BufferReader, BufferWriter, getSystemEndianness } from 'typed-binary'; import { getCompiledWriterForSchema } from '../../data/compiledIO.ts'; import { readData, writeData } from '../../data/dataIO.ts'; -import type { AnyData } from '../../data/dataTypes.ts'; +import { isDisarray, type AnyData } from '../../data/dataTypes.ts'; import { getWriteInstructions } from '../../data/partialIO.ts'; import { sizeOf } from '../../data/sizeOf.ts'; import type { BaseData } from '../../data/wgslTypes.ts'; -import { isWgslData } from '../../data/wgslTypes.ts'; +import { isVec, isWgslArray, isWgslData } from '../../data/wgslTypes.ts'; import type { StorageFlag } from '../../extension.ts'; import type { TgpuNamable } from '../../shared/meta.ts'; import { getName, setName } from '../../shared/meta.ts'; import type { Infer, + InferInput, InferPartial, IsValidIndexSchema, IsValidStorageSchema, @@ -103,6 +104,11 @@ type InnerValidUsagesFor = { export type ValidUsagesFor = InnerValidUsagesFor['usage']; +export type BufferWriteOptions = { + startOffset?: number; + endOffset?: number; +}; + export interface TgpuBuffer extends TgpuNamable { readonly [$internal]: true; readonly resourceType: 'buffer'; @@ -131,7 +137,7 @@ export interface TgpuBuffer extends TgpuNamable { as>(usage: T): UsageTypeToBufferUsage[T]; compileWriter(): void; - write(data: Infer): void; + write(data: InferInput, options?: BufferWriteOptions): void; writePartial(data: InferPartial): void; clear(): void; copyFrom(srcBuffer: TgpuBuffer>): void; @@ -222,7 +228,7 @@ class TgpuBufferImpl implements TgpuBuffer { }); if (this.initial) { - this._writeToTarget(this._buffer.getMappedRange(), this.initial); + this._writeToTarget(this._buffer.getMappedRange(), this.initial as InferInput); this._buffer.unmap(); } } @@ -287,12 +293,21 @@ class TgpuBufferImpl implements TgpuBuffer { getCompiledWriterForSchema(this.dataType); } - private _writeToTarget(target: ArrayBuffer, data: Infer): void { + private _writeToTarget( + target: ArrayBuffer, + data: InferInput, + options?: BufferWriteOptions, + ): void { + const dataView = new DataView(target); + const isLittleEndian = endianness === 'little'; + const startOffset = options?.startOffset ?? 0; + const endOffset = options?.endOffset ?? target.byteLength; + const compiledWriter = getCompiledWriterForSchema(this.dataType); if (compiledWriter) { try { - compiledWriter(new DataView(target), 0, data, endianness === 'little'); + compiledWriter(dataView, startOffset, data, isLittleEndian, endOffset); return; } catch (error) { console.error( @@ -304,25 +319,52 @@ class TgpuBufferImpl implements TgpuBuffer { } } - writeData(new BufferWriter(target), this.dataType, data); + if ( + ArrayBuffer.isView(data) && + !(data instanceof DataView) && + (isWgslArray(this.dataType) || isDisarray(this.dataType)) && + isVec((this.dataType as { elementType?: unknown }).elementType) + ) { + throw new Error( + 'Flat TypedArray input for arrays of vectors requires the compiled writer. ' + + 'This environment does not allow eval - pass an array of vec instances or plain tuples instead.', + ); + } + + const writer = new BufferWriter(target); + writer.seekTo(startOffset); + writeData(writer, this.dataType, data as Infer); } - write(data: Infer): void { + write(data: InferInput, options?: BufferWriteOptions): void { const gpuBuffer = this.buffer; + const bufferSize = sizeOf(this.dataType); + const startOffset = options?.startOffset ?? 0; + const endOffset = options?.endOffset ?? bufferSize; + const size = endOffset - startOffset; + + if (startOffset < 0 || !Number.isInteger(startOffset)) { + throw new Error(`startOffset must be a non-negative integer, got ${startOffset}`); + } + if (endOffset < startOffset) { + throw new Error(`endOffset (${endOffset}) must be >= startOffset (${startOffset})`); + } + if (endOffset > bufferSize) { + throw new Error(`endOffset (${endOffset}) exceeds buffer size (${bufferSize})`); + } if (gpuBuffer.mapState === 'mapped') { const mapped = gpuBuffer.getMappedRange(); - this._writeToTarget(mapped, data); + this._writeToTarget(mapped, data, options); return; } - const size = sizeOf(this.dataType); if (!this._hostBuffer) { - this._hostBuffer = new ArrayBuffer(size); + this._hostBuffer = new ArrayBuffer(sizeOf(this.dataType)); } - this._writeToTarget(this._hostBuffer, data); - this.#device.queue.writeBuffer(gpuBuffer, 0, this._hostBuffer, 0, size); + this._writeToTarget(this._hostBuffer, data, options); + this.#device.queue.writeBuffer(gpuBuffer, startOffset, this._hostBuffer, startOffset, size); } public writePartial(data: InferPartial): void { diff --git a/packages/typegpu/src/core/buffer/bufferShorthand.ts b/packages/typegpu/src/core/buffer/bufferShorthand.ts index 767c50b3c..3f375c086 100644 --- a/packages/typegpu/src/core/buffer/bufferShorthand.ts +++ b/packages/typegpu/src/core/buffer/bufferShorthand.ts @@ -2,10 +2,10 @@ import type { ResolvedSnippet } from '../../data/snippet.ts'; import type { BaseData } from '../../data/wgslTypes.ts'; import type { StorageFlag } from '../../extension.ts'; import { getName, setName, type TgpuNamable } from '../../shared/meta.ts'; -import type { Infer, InferGPU, InferPartial } from '../../shared/repr.ts'; +import type { Infer, InferGPU, InferInput, InferPartial } from '../../shared/repr.ts'; import { $getNameForward, $gpuValueOf, $internal, $resolve } from '../../shared/symbols.ts'; import type { ResolutionCtx, SelfResolvable } from '../../types.ts'; -import type { TgpuBuffer, UniformFlag } from './buffer.ts'; +import type { BufferWriteOptions, TgpuBuffer, UniformFlag } from './buffer.ts'; import type { TgpuBufferUsage } from './bufferUsage.ts'; // ---------- @@ -16,7 +16,7 @@ interface TgpuBufferShorthandBase extends TgpuNamable { readonly [$internal]: true; // Accessible on the CPU - write(data: Infer): void; + write(data: InferInput, options?: BufferWriteOptions): void; writePartial(data: InferPartial): void; read(): Promise>; // --- @@ -103,8 +103,8 @@ export class TgpuBufferShorthandImpl< return this; } - write(data: Infer): void { - this.buffer.write(data); + write(data: InferInput, options?: BufferWriteOptions): void { + this.buffer.write(data, options); } writePartial(data: InferPartial): void { diff --git a/packages/typegpu/src/data/compiledIO.ts b/packages/typegpu/src/data/compiledIO.ts index 398866340..73e25bfc0 100644 --- a/packages/typegpu/src/data/compiledIO.ts +++ b/packages/typegpu/src/data/compiledIO.ts @@ -1,5 +1,5 @@ import { roundUp } from '../mathUtils.ts'; -import type { Infer } from '../shared/repr.ts'; +import type { InferInput } from '../shared/repr.ts'; import { alignmentOf } from './alignmentOf.ts'; import { isDisarray, isUnstruct } from './dataTypes.ts'; import { offsetsForProps } from './offsets.ts'; @@ -19,7 +19,13 @@ export const EVAL_ALLOWED_IN_ENV: boolean = (() => { const compiledWriters = new WeakMap< wgsl.BaseData, - (output: DataView, offset: number, value: unknown, littleEndian?: boolean) => void + ( + output: DataView, + offset: number, + value: unknown, + littleEndian?: boolean, + endOffset?: number, + ) => void >(); const typeToPrimitive = { @@ -147,11 +153,12 @@ export function buildWriter( offsetExpr: string, valueExpr: string, depth = 0, + partial = false, ): string { const loopVar = ['i', 'j', 'k'][depth] || `i${depth}`; if (wgsl.isAtomic(node) || wgsl.isDecorated(node)) { - return buildWriter(node.inner, offsetExpr, valueExpr, depth); + return buildWriter(node.inner, offsetExpr, valueExpr, depth, partial); } if (wgsl.isWgslStruct(node) || isUnstruct(node)) { @@ -165,6 +172,7 @@ export function buildWriter( `(${offsetExpr} + ${propOffset.offset})`, `${valueExpr}.${key}`, depth, + partial, ); } return code; @@ -174,12 +182,43 @@ export function buildWriter( const elementSize = roundUp(sizeOf(node.elementType), alignmentOf(node)); let code = ''; + if (wgsl.isVec(node.elementType)) { + const N = node.elementType.componentCount; + const primitive = typeToPrimitive[node.elementType.type as keyof typeof typeToPrimitive]; + const componentSize = sizeOf(node.elementType.primitive); + const writeFunc = primitiveToWriteFunction[primitive]; + const taVar = `_ta${depth}`; + + code += `var ${taVar} = ArrayBuffer.isView(${valueExpr}) && !(${valueExpr} instanceof DataView);\n`; + code += `for (let ${loopVar} = 0; ${loopVar} < ${node.elementCount}; ${loopVar}++) {\n`; + if (partial) { + code += `if ((${offsetExpr} + ${loopVar} * ${elementSize}) >= endOffset) { break; }\n`; + } + for (let c = 0; c < N; c++) { + const byteOff = c * componentSize; + const access = `${taVar} ? ${valueExpr}[${loopVar} * ${N} + ${c}] : ${valueExpr}[${loopVar}][${c}]`; + if (partial) { + code += `if ((${offsetExpr} + ${loopVar} * ${elementSize} + ${byteOff}) < endOffset) {\n`; + code += `output.${writeFunc}((${offsetExpr} + ${loopVar} * ${elementSize} + ${byteOff}), ${access}, littleEndian);\n`; + code += '}\n'; + } else { + code += `output.${writeFunc}((${offsetExpr} + ${loopVar} * ${elementSize} + ${byteOff}), ${access}, littleEndian);\n`; + } + } + code += '}\n'; + return code; + } + code += `for (let ${loopVar} = 0; ${loopVar} < ${node.elementCount}; ${loopVar}++) {\n`; + if (partial) { + code += `if ((${offsetExpr} + ${loopVar} * ${elementSize}) >= endOffset) return;\n`; + } code += buildWriter( node.elementType, `(${offsetExpr} + ${loopVar} * ${elementSize})`, `${valueExpr}[${loopVar}]`, depth + 1, + partial, ); code += '}\n'; @@ -192,14 +231,19 @@ export function buildWriter( } const primitive = typeToPrimitive[node.type]; + const componentSize = sizeOf(node.primitive); let code = ''; const writeFunc = primitiveToWriteFunction[primitive]; - const components = ['x', 'y', 'z', 'w']; for (let i = 0; i < node.componentCount; i++) { - code += `output.${writeFunc}((${offsetExpr} + ${i * 4}), ${valueExpr}.${ - components[i] - }, littleEndian);\n`; + const byteOff = i * componentSize; + if (partial) { + code += `if ((${offsetExpr} + ${byteOff}) < endOffset) {\n`; + code += `output.${writeFunc}((${offsetExpr} + ${byteOff}), ${valueExpr}[${i}], littleEndian);\n`; + code += '}\n'; + } else { + code += `output.${writeFunc}((${offsetExpr} + ${byteOff}), ${valueExpr}[${i}], littleEndian);\n`; + } } return code; } @@ -218,9 +262,17 @@ export function buildWriter( const rowIndex = idx % matSize; const byteOffset = colIndex * rowStride + rowIndex * 4; - code += `output.${writeFunc}((${offsetExpr} + ${byteOffset}), ${valueExpr}.columns[${colIndex}].${ - ['x', 'y', 'z', 'w'][rowIndex] - }, littleEndian);\n`; + if (partial) { + code += `if ((${offsetExpr} + ${byteOffset}) < endOffset) {\n`; + code += `output.${writeFunc}((${offsetExpr} + ${byteOffset}), ${valueExpr}.columns[${colIndex}].${ + ['x', 'y', 'z', 'w'][rowIndex] + }, littleEndian);\n`; + code += '}\n'; + } else { + code += `output.${writeFunc}((${offsetExpr} + ${byteOffset}), ${valueExpr}.columns[${colIndex}].${ + ['x', 'y', 'z', 'w'][rowIndex] + }, littleEndian);\n`; + } } return code; @@ -231,6 +283,9 @@ export function buildWriter( if (formatName in specialPackedFormats) { const handler = specialPackedFormats[formatName as keyof typeof specialPackedFormats]; + if (partial) { + return `if ((${offsetExpr}) < endOffset) {\n${handler.generator(offsetExpr, valueExpr)}}\n`; + } return handler.generator(offsetExpr, valueExpr); } @@ -252,9 +307,14 @@ export function buildWriter( for (let idx = 0; idx < componentCount; idx++) { const accessor = componentCount === 1 ? valueExpr : `${valueExpr}.${components[idx]}`; const value = transform ? transform(accessor) : accessor; - code += `output.${writeFunc}((${offsetExpr} + ${ - idx * componentSize - }), ${value}, littleEndian);\n`; + const byteOff = idx * componentSize; + if (partial) { + code += `if ((${offsetExpr} + ${byteOff}) < endOffset) {\n`; + code += `output.${writeFunc}((${offsetExpr} + ${byteOff}), ${value}, littleEndian);\n`; + code += '}\n'; + } else { + code += `output.${writeFunc}((${offsetExpr} + ${byteOff}), ${value}, littleEndian);\n`; + } } return code; @@ -265,15 +325,24 @@ export function buildWriter( } const primitive = typeToPrimitive[node.type as keyof typeof typeToPrimitive]; - return `output.${ - primitiveToWriteFunction[primitive] - }(${offsetExpr}, ${valueExpr}, littleEndian);\n`; + if (partial) { + return `if ((${offsetExpr}) < endOffset) {\noutput.${ + primitiveToWriteFunction[primitive] + }(${offsetExpr}, ${valueExpr}, littleEndian);\n}\n`; + } + return `output.${primitiveToWriteFunction[primitive]}(${offsetExpr}, ${valueExpr}, littleEndian);\n`; } export function getCompiledWriterForSchema( schema: T, ): - | ((output: DataView, offset: number, value: Infer, littleEndian?: boolean) => void) + | (( + output: DataView, + offset: number, + value: InferInput, + littleEndian?: boolean, + endOffset?: number, + ) => void) | undefined { if (!EVAL_ALLOWED_IN_ENV) { console.warn('This environment does not allow eval - using default writer as fallback'); @@ -284,20 +353,31 @@ export function getCompiledWriterForSchema( return compiledWriters.get(schema) as ( output: DataView, offset: number, - value: Infer, + value: InferInput, littleEndian?: boolean, + endOffset?: number, ) => void; } try { - const body = buildWriter(schema, 'offset', 'value', 0); + const fullBody = buildWriter(schema, 'offset', 'value', 0, false); + const partialBody = buildWriter(schema, 'offset', 'value', 0, true); + const body = `if (offset > 0 || endOffset < output.byteLength) {\n${partialBody}} else {\n${fullBody}}\n`; // oxlint-disable-next-line typescript-eslint/no-implied-eval - const fn = new Function('output', 'offset', 'value', 'littleEndian=true', body) as ( + const fn = new Function( + 'output', + 'offset', + 'value', + 'littleEndian=true', + 'endOffset=output.byteLength', + body, + ) as ( output: DataView, offset: number, value: unknown, littleEndian?: boolean, + endOffset?: number, ) => void; compiledWriters.set(schema, fn); diff --git a/packages/typegpu/src/data/dataIO.ts b/packages/typegpu/src/data/dataIO.ts index a711bb481..cf8ae9348 100644 --- a/packages/typegpu/src/data/dataIO.ts +++ b/packages/typegpu/src/data/dataIO.ts @@ -66,23 +66,23 @@ const dataWriters = { }, vec2f(output, _, value: wgsl.v2f) { - output.writeFloat32(value.x); - output.writeFloat32(value.y); + output.writeFloat32(value[0]); + output.writeFloat32(value[1]); }, vec2h(output, _, value: wgsl.v2h) { - output.writeFloat16(value.x); - output.writeFloat16(value.y); + output.writeFloat16(value[0]); + output.writeFloat16(value[1]); }, vec2i(output, _, value: wgsl.v2i) { - output.writeInt32(value.x); - output.writeInt32(value.y); + output.writeInt32(value[0]); + output.writeInt32(value[1]); }, vec2u(output, _, value: wgsl.v2u) { - output.writeUint32(value.x); - output.writeUint32(value.y); + output.writeUint32(value[0]); + output.writeUint32(value[1]); }, 'vec2'() { @@ -90,27 +90,27 @@ const dataWriters = { }, vec3f(output, _, value: wgsl.v3f) { - output.writeFloat32(value.x); - output.writeFloat32(value.y); - output.writeFloat32(value.z); + output.writeFloat32(value[0]); + output.writeFloat32(value[1]); + output.writeFloat32(value[2]); }, vec3h(output, _, value: wgsl.v3h) { - output.writeFloat16(value.x); - output.writeFloat16(value.y); - output.writeFloat16(value.z); + output.writeFloat16(value[0]); + output.writeFloat16(value[1]); + output.writeFloat16(value[2]); }, vec3i(output, _, value: wgsl.v3i) { - output.writeInt32(value.x); - output.writeInt32(value.y); - output.writeInt32(value.z); + output.writeInt32(value[0]); + output.writeInt32(value[1]); + output.writeInt32(value[2]); }, vec3u(output, _, value: wgsl.v3u) { - output.writeUint32(value.x); - output.writeUint32(value.y); - output.writeUint32(value.z); + output.writeUint32(value[0]); + output.writeUint32(value[1]); + output.writeUint32(value[2]); }, 'vec3'() { @@ -118,31 +118,31 @@ const dataWriters = { }, vec4f(output, _, value: wgsl.v4f) { - output.writeFloat32(value.x); - output.writeFloat32(value.y); - output.writeFloat32(value.z); - output.writeFloat32(value.w); + output.writeFloat32(value[0]); + output.writeFloat32(value[1]); + output.writeFloat32(value[2]); + output.writeFloat32(value[3]); }, vec4h(output, _, value: wgsl.v4h) { - output.writeFloat16(value.x); - output.writeFloat16(value.y); - output.writeFloat16(value.z); - output.writeFloat16(value.w); + output.writeFloat16(value[0]); + output.writeFloat16(value[1]); + output.writeFloat16(value[2]); + output.writeFloat16(value[3]); }, vec4i(output, _, value: wgsl.v4i) { - output.writeInt32(value.x); - output.writeInt32(value.y); - output.writeInt32(value.z); - output.writeInt32(value.w); + output.writeInt32(value[0]); + output.writeInt32(value[1]); + output.writeInt32(value[2]); + output.writeInt32(value[3]); }, vec4u(output, _, value: wgsl.v4u) { - output.writeUint32(value.x); - output.writeUint32(value.y); - output.writeUint32(value.z); - output.writeUint32(value.w); + output.writeUint32(value[0]); + output.writeUint32(value[1]); + output.writeUint32(value[2]); + output.writeUint32(value[3]); }, 'vec4'() { diff --git a/packages/typegpu/src/data/index.ts b/packages/typegpu/src/data/index.ts index 3a302bc26..7b51b0467 100644 --- a/packages/typegpu/src/data/index.ts +++ b/packages/typegpu/src/data/index.ts @@ -231,4 +231,4 @@ export type { BuiltinVertexIndex, BuiltinWorkgroupId, } from '../builtin.ts'; -export type { Infer, InferGPU, InferPartial } from '../shared/repr.ts'; +export type { Infer, InferGPU, InferInput, InferPartial } from '../shared/repr.ts'; diff --git a/packages/typegpu/src/data/offsetUtils.ts b/packages/typegpu/src/data/offsetUtils.ts index 0c0c4039f..41ca39ccc 100644 --- a/packages/typegpu/src/data/offsetUtils.ts +++ b/packages/typegpu/src/data/offsetUtils.ts @@ -282,12 +282,12 @@ export interface PrimitiveOffsetInfo { * ``` * * @param schema - The data schema to analyze. - * @param accessor - Optional function that accesses a specific primitive within the schema. If omitted, uses the root offset (0). + * @param accessor - Optional function that accesses a specific element within the schema. If omitted, uses the root offset (0). * @returns An object containing the offset and contiguous byte information. */ export function memoryLayoutOf( schema: T, - accessor?: (proxy: Infer) => number, + accessor?: (proxy: Infer) => unknown, ): PrimitiveOffsetInfo { if (!accessor) { return { @@ -306,5 +306,7 @@ export function memoryLayoutOf( }; } - throw new Error('Invalid accessor result. Expected an offset proxy with markers.'); + throw new Error( + 'memoryLayoutOf: accessor did not return a schema element. Make sure the accessor navigates to a field or element of the schema (e.g. `(s) => s.position.x`).', + ); } diff --git a/packages/typegpu/src/data/wgslTypes.ts b/packages/typegpu/src/data/wgslTypes.ts index 989102228..96cced79c 100644 --- a/packages/typegpu/src/data/wgslTypes.ts +++ b/packages/typegpu/src/data/wgslTypes.ts @@ -8,6 +8,7 @@ import type { Infer, InferGPU, InferGPURecord, + InferInput, InferPartial, InferPartialRecord, InferRecord, @@ -19,6 +20,7 @@ import type { } from '../shared/repr.ts'; import type { $gpuRepr, + $inRepr, $invalidSchemaReason, $memIdent, $repr, @@ -49,6 +51,20 @@ export interface NumberArrayView { [Symbol.iterator]: () => Iterator; } +/** + * Maps a scalar or vector element schema to the corresponding TypedArray type. + * Used to determine which TypedArrays are accepted for array write operations. + */ +type TypedArrayFor = T extends Vec2f | Vec3f | Vec4f | F32 + ? Float32Array + : T extends Vec2h | Vec3h | Vec4h | F16 + ? Float16Array + : T extends Vec2i | Vec3i | Vec4i | I32 + ? Int32Array + : T extends Vec2u | Vec3u | Vec4u | U32 + ? Uint32Array + : never; + /** * Vector infix notation. * @@ -676,6 +692,7 @@ export interface Vec2f // Type-tokens, not available at runtime readonly [$repr]: v2f; + readonly [$inRepr]: v2f | [number, number] | Float32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -700,6 +717,7 @@ export interface Vec2h // Type-tokens, not available at runtime readonly [$repr]: v2h; + readonly [$inRepr]: v2h | [number, number] | Float16Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -724,6 +742,7 @@ export interface Vec2i // Type-tokens, not available at runtime readonly [$repr]: v2i; + readonly [$inRepr]: v2i | [number, number] | Int32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -748,6 +767,7 @@ export interface Vec2u // Type-tokens, not available at runtime readonly [$repr]: v2u; + readonly [$inRepr]: v2u | [number, number] | Uint32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -794,6 +814,7 @@ export interface Vec3f // Type-tokens, not available at runtime readonly [$repr]: v3f; + readonly [$inRepr]: v3f | [number, number, number] | Float32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -820,6 +841,7 @@ export interface Vec3h // Type-tokens, not available at runtime readonly [$repr]: v3h; + readonly [$inRepr]: v3h | [number, number, number] | Float16Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -846,6 +868,7 @@ export interface Vec3i // Type-tokens, not available at runtime readonly [$repr]: v3i; + readonly [$inRepr]: v3i | [number, number, number] | Int32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -872,6 +895,7 @@ export interface Vec3u // Type-tokens, not available at runtime readonly [$repr]: v3u; + readonly [$inRepr]: v3u | [number, number, number] | Uint32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -927,6 +951,7 @@ export interface Vec4f // Type-tokens, not available at runtime readonly [$repr]: v4f; + readonly [$inRepr]: v4f | [number, number, number, number] | Float32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -957,6 +982,7 @@ export interface Vec4h // Type-tokens, not available at runtime readonly [$repr]: v4h; + readonly [$inRepr]: v4h | [number, number, number, number] | Float16Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -987,6 +1013,7 @@ export interface Vec4i // Type-tokens, not available at runtime readonly [$repr]: v4i; + readonly [$inRepr]: v4i | [number, number, number, number] | Int32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -1017,6 +1044,7 @@ export interface Vec4u // Type-tokens, not available at runtime readonly [$repr]: v4u; + readonly [$inRepr]: v4u | [number, number, number, number] | Uint32Array; readonly [$validStorageSchema]: true; readonly [$validUniformSchema]: true; readonly [$validVertexSchema]: true; @@ -1134,6 +1162,9 @@ export interface WgslArray extends Bas // Type-tokens, not available at runtime readonly [$repr]: Infer[]; + readonly [$inRepr]: TypedArrayFor extends never + ? InferInput[] + : InferInput[] | TypedArrayFor; readonly [$gpuRepr]: InferGPU[]; readonly [$reprPartial]: { idx: number; value: InferPartial }[] | undefined; readonly [$memIdent]: WgslArray>; diff --git a/packages/typegpu/src/shared/repr.ts b/packages/typegpu/src/shared/repr.ts index aa366f56b..7900e5b57 100644 --- a/packages/typegpu/src/shared/repr.ts +++ b/packages/typegpu/src/shared/repr.ts @@ -5,6 +5,7 @@ import type { U16, U32, WgslArray } from '../data/wgslTypes.ts'; import type { $gpuRepr, $gpuValueOf, + $inRepr, $invalidSchemaReason, $memIdent, $repr, @@ -27,6 +28,18 @@ import type { Default } from './utilityTypes.ts'; */ export type Infer = T extends { readonly [$repr]: infer TRepr } ? TRepr : T; +/** + * Extracts the inferred input (write-side) representation of a resource. + * More permissive than {@link Infer} — accepts plain arrays and TypedArrays + * for vector and array schemas. Falls back to {@link Infer} when $inRepr is not defined. + * + * @example + * type A = InferInput // => v3f | [number, number, number] | Float32Array + * type B = InferInput> // => (v3f | [number, number, number] | Float32Array)[] | Float32Array + * type C = InferInput // => number (same as Infer) + */ +export type InferInput = T extends { readonly [$inRepr]: infer TRepr } ? TRepr : Infer; + /** * Extracts a sparse/partial inferred representation of a resource. * Used by the `buffer.writePartial` API. diff --git a/packages/typegpu/src/shared/symbols.ts b/packages/typegpu/src/shared/symbols.ts index 7a6104fc4..505cc2da2 100644 --- a/packages/typegpu/src/shared/symbols.ts +++ b/packages/typegpu/src/shared/symbols.ts @@ -53,6 +53,11 @@ export const $gpuRepr = Symbol(`typegpu:${version}:$gpuRepr`); * If present, it shadows the value of `$repr` for use in partial IO. */ export const $reprPartial = Symbol(`typegpu:${version}:$reprPartial`); +/** + * Type token for the write-side (input) representation of a resource. + * More permissive than $repr. If absent, $repr is used as the fallback. + */ +export const $inRepr = Symbol(`typegpu:${version}:$inRepr`); /** * Type token holding schemas that are identical in memory layout. */ diff --git a/packages/typegpu/tests/buffer.test.ts b/packages/typegpu/tests/buffer.test.ts index 9c6335384..8a702b5f4 100644 --- a/packages/typegpu/tests/buffer.test.ts +++ b/packages/typegpu/tests/buffer.test.ts @@ -1,6 +1,7 @@ import { attest } from '@ark/attest'; -import { describe, expect, expectTypeOf } from 'vitest'; +import { describe, expect, expectTypeOf, vi } from 'vitest'; import * as d from '../src/data/index.ts'; +import { sizeOf } from '../src/data/sizeOf.ts'; import type { ValidateBufferSchema, ValidUsagesFor } from '../src/index.js'; import { getName } from '../src/shared/meta.ts'; import type { IsValidBufferSchema, IsValidUniformSchema } from '../src/shared/repr.ts'; @@ -112,6 +113,147 @@ describe('TgpuBuffer', () => { expect(mappedBuffer.unmap).not.toHaveBeenCalled(); }); + it('should write a scalar array chunk from startOffset through the end when endOffset is omitted', ({ + root, + device, + }) => { + const schema = d.arrayOf(d.u32, 6); + const buffer = root.createBuffer(schema); + const rawBuffer = root.unwrap(buffer); + const layout = d.memoryLayoutOf(schema, (a) => a[3]); + + buffer.write([4, 5, 6], { + startOffset: layout.offset, + }); + + expect(device.mock.queue.writeBuffer.mock.calls).toStrictEqual([ + [ + rawBuffer, + layout.offset, + expect.any(ArrayBuffer), + layout.offset, + sizeOf(schema) - layout.offset, + ], + ]); + + const uploadedBuffer = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Uint32Array(uploadedBuffer)]).toStrictEqual([0, 0, 0, 4, 5, 6]); + }); + + it('should write a padded array chunk from startOffset through the end when endOffset is omitted', ({ + root, + device, + }) => { + const schema = d.arrayOf(d.vec3u, 4); + const buffer = root.createBuffer(schema); + const rawBuffer = root.unwrap(buffer); + const layout = d.memoryLayoutOf(schema, (a) => a[1]?.x); + + buffer.write([d.vec3u(4, 5, 6), d.vec3u(7, 8, 9), d.vec3u(10, 11, 12)], { + startOffset: layout.offset, + }); + + expect(device.mock.queue.writeBuffer.mock.calls).toStrictEqual([ + [ + rawBuffer, + layout.offset, + expect.any(ArrayBuffer), + layout.offset, + sizeOf(schema) - layout.offset, + ], + ]); + + const uploadedBuffer = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Uint32Array(uploadedBuffer)]).toStrictEqual([ + 0, 0, 0, 0, 4, 5, 6, 0, 7, 8, 9, 0, 10, 11, 12, 0, + ]); + }); + + it('should write a single padded element when both startOffset and endOffset are provided', ({ + root, + device, + }) => { + const schema = d.arrayOf(d.vec3u, 4); + const buffer = root.createBuffer(schema); + const rawBuffer = root.unwrap(buffer); + const startLayout = d.memoryLayoutOf(schema, (a) => a[1]); + const endLayout = d.memoryLayoutOf(schema, (a) => a[2]); + + buffer.write([d.vec3u(4, 5, 6)], { + startOffset: startLayout.offset, + endOffset: endLayout.offset, + }); + + expect(device.mock.queue.writeBuffer.mock.calls).toStrictEqual([ + [ + rawBuffer, + startLayout.offset, + expect.any(ArrayBuffer), + startLayout.offset, + endLayout.offset - startLayout.offset, + ], + ]); + + const uploadedBuffer = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Uint32Array(uploadedBuffer)]).toStrictEqual([ + 0, 0, 0, 0, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + }); + + it('should write a single padded element to a mapped buffer when both startOffset and endOffset are provided', ({ + root, + }) => { + const schema = d.arrayOf(d.vec3u, 4); + const mappedBuffer = root.device.createBuffer({ + size: sizeOf(schema), + usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, + mappedAtCreation: true, + }); + const buffer = root.createBuffer(schema, mappedBuffer); + const startLayout = d.memoryLayoutOf(schema, (a) => a[1]); + const endLayout = d.memoryLayoutOf(schema, (a) => a[2]); + + buffer.write([d.vec3u(4, 5, 6)], { + startOffset: startLayout.offset, + endOffset: endLayout.offset, + }); + + const writtenBuffer = vi.mocked(mappedBuffer.getMappedRange).mock.results[0] + ?.value as ArrayBuffer; + expect([...new Uint32Array(writtenBuffer)]).toStrictEqual([ + 0, 0, 0, 0, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + }); + + it('should throw on negative startOffset', ({ root }) => { + const buffer = root.createBuffer(d.arrayOf(d.u32, 4)); + expect(() => buffer.write([1], { startOffset: -4 })).toThrow( + 'startOffset must be a non-negative integer', + ); + }); + + it('should throw on non-integer startOffset', ({ root }) => { + const buffer = root.createBuffer(d.arrayOf(d.u32, 4)); + expect(() => buffer.write([1], { startOffset: 1.5 })).toThrow( + 'startOffset must be a non-negative integer', + ); + }); + + it('should throw when endOffset < startOffset', ({ root }) => { + const buffer = root.createBuffer(d.arrayOf(d.u32, 4)); + expect(() => buffer.write([1], { startOffset: 8, endOffset: 4 })).toThrow( + 'endOffset (4) must be >= startOffset (8)', + ); + }); + + it('should throw when endOffset exceeds buffer size', ({ root }) => { + const schema = d.arrayOf(d.u32, 4); + const buffer = root.createBuffer(schema); + expect(() => buffer.write([1], { endOffset: sizeOf(schema) + 4 })).toThrow( + `endOffset (${sizeOf(schema) + 4}) exceeds buffer size (${sizeOf(schema)})`, + ); + }); + it('should map a mappable buffer before reading', async ({ root }) => { const rawBuffer = root.device.createBuffer({ size: 12, @@ -558,6 +700,110 @@ describe('TgpuBuffer', () => { }); }); +describe('TgpuBuffer (InferInput)', () => { + it('should accept plain tuples and TypedArrays for vec schemas at the type level', ({ root }) => { + const vec3fBuf = root.createBuffer(d.vec3f); + const vec2iBuf = root.createBuffer(d.vec2i); + const arrBuf = root.createBuffer(d.arrayOf(d.vec3f, 2)); + const scalarArrBuf = root.createBuffer(d.arrayOf(d.f32, 3)); + + // vec3f write accepts: instance, tuple, or Float32Array + expectTypeOf(vec3fBuf.write) + .parameter(0) + .toEqualTypeOf(); + + // vec2i write accepts: instance, tuple, or Int32Array + expectTypeOf(vec2iBuf.write) + .parameter(0) + .toEqualTypeOf(); + + // arrayOf(vec3f) write accepts: array of element inputs OR flat Float32Array + expectTypeOf(arrBuf.write) + .parameter(0) + .toEqualTypeOf<(d.v3f | [number, number, number] | Float32Array)[] | Float32Array>(); + + // arrayOf(f32) write accepts: number array OR Float32Array + expectTypeOf(scalarArrBuf.write).parameter(0).toEqualTypeOf(); + }); + + it('should write a vec3f from a plain tuple', ({ root, device }) => { + const buffer = root.createBuffer(d.vec3f); + + buffer.write([1, 2, 3]); + + const rawBuffer = root.unwrap(buffer); + const [uploadedBuffer] = device.mock.queue.writeBuffer.mock.calls[0] ?? []; + expect(uploadedBuffer).toBe(rawBuffer); + const data = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Float32Array(data, 0, 3)]).toStrictEqual([1, 2, 3]); + }); + + it('should write a vec3f from a Float32Array', ({ root, device }) => { + const buffer = root.createBuffer(d.vec3f); + + buffer.write(new Float32Array([1, 2, 3])); + + const data = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Float32Array(data, 0, 3)]).toStrictEqual([1, 2, 3]); + }); + + it('should write an array of vec3f from plain tuples', ({ root, device }) => { + const buffer = root.createBuffer(d.arrayOf(d.vec3f, 2)); + + buffer.write([ + [1, 2, 3], + [4, 5, 6], + ]); + + const data = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + // GPU layout: each vec3f occupies 16 bytes (vec3 alignment = 16) + expect([...new Float32Array(data, 0, 3)]).toStrictEqual([1, 2, 3]); + expect([...new Float32Array(data, 16, 3)]).toStrictEqual([4, 5, 6]); + }); + + it('should write an array of vec3f from a flat Float32Array with stride correction', ({ + root, + device, + }) => { + // Packed input: 3 floats per element; GPU layout: 4 floats per element (padding) + const buffer = root.createBuffer(d.arrayOf(d.vec3f, 2)); + + buffer.write(new Float32Array([1, 2, 3, 4, 5, 6])); + + const data = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Float32Array(data, 0, 3)]).toStrictEqual([1, 2, 3]); + expect([...new Float32Array(data, 16, 3)]).toStrictEqual([4, 5, 6]); + }); + + it('should write an array of vec3f where each element is a view into a shared ArrayBuffer', ({ + root, + device, + }) => { + const buffer = root.createBuffer(d.arrayOf(d.vec3f, 2)); + + const sharedBuffer = new ArrayBuffer(24); // 2 * 3 floats * 4 bytes + const v0 = new Float32Array(sharedBuffer, 0, 3); + const v1 = new Float32Array(sharedBuffer, 12, 3); + v0.set([1, 2, 3]); + v1.set([4, 5, 6]); + + buffer.write([v0, v1]); + + const data = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Float32Array(data, 0, 3)]).toStrictEqual([1, 2, 3]); + expect([...new Float32Array(data, 16, 3)]).toStrictEqual([4, 5, 6]); + }); + + it('should write an array of f32 scalars from a Float32Array', ({ root, device }) => { + const buffer = root.createBuffer(d.arrayOf(d.f32, 3)); + + buffer.write(new Float32Array([1.5, 2.5, 3.5])); + + const data = device.mock.queue.writeBuffer.mock.calls[0]?.[2] as ArrayBuffer; + expect([...new Float32Array(data)]).toStrictEqual([1.5, 2.5, 3.5]); + }); +}); + describe('IsValidUniformSchema', () => { it('treats booleans as invalid', () => { expectTypeOf>().toEqualTypeOf(); diff --git a/packages/typegpu/tests/compiledIO.test.ts b/packages/typegpu/tests/compiledIO.test.ts index 530bb2eb7..1cc4b93e2 100644 --- a/packages/typegpu/tests/compiledIO.test.ts +++ b/packages/typegpu/tests/compiledIO.test.ts @@ -14,9 +14,9 @@ describe('buildWriter', () => { expect(writer).toMatchInlineSnapshot(` "output.setUint32((offset + 0), value.a, littleEndian); - output.setFloat32(((offset + 16) + 0), value.b.x, littleEndian); - output.setFloat32(((offset + 16) + 4), value.b.y, littleEndian); - output.setFloat32(((offset + 16) + 8), value.b.z, littleEndian); + output.setFloat32(((offset + 16) + 0), value.b[0], littleEndian); + output.setFloat32(((offset + 16) + 4), value.b[1], littleEndian); + output.setFloat32(((offset + 16) + 8), value.b[2], littleEndian); " `); }); @@ -31,10 +31,11 @@ describe('buildWriter', () => { expect(writer).toMatchInlineSnapshot(` "output.setUint32((offset + 0), value.a, littleEndian); + var _ta0 = ArrayBuffer.isView(value.b) && !(value.b instanceof DataView); for (let i = 0; i < 2; i++) { - output.setFloat32((((offset + 16) + i * 16) + 0), value.b[i].x, littleEndian); - output.setFloat32((((offset + 16) + i * 16) + 4), value.b[i].y, littleEndian); - output.setFloat32((((offset + 16) + i * 16) + 8), value.b[i].z, littleEndian); + output.setFloat32(((offset + 16) + i * 16 + 0), _ta0 ? value.b[i * 3 + 0] : value.b[i][0], littleEndian); + output.setFloat32(((offset + 16) + i * 16 + 4), _ta0 ? value.b[i * 3 + 1] : value.b[i][1], littleEndian); + output.setFloat32(((offset + 16) + i * 16 + 8), _ta0 ? value.b[i * 3 + 2] : value.b[i][2], littleEndian); } for (let i = 0; i < 3; i++) { output.setUint32(((offset + 48) + i * 4), value.c[i], littleEndian); @@ -54,9 +55,9 @@ describe('buildWriter', () => { expect(writer).toMatchInlineSnapshot(` "output.setUint32((offset + 0), value.a, littleEndian); - output.setFloat32((((offset + 16) + 0) + 0), value.b.d.x, littleEndian); - output.setFloat32((((offset + 16) + 0) + 4), value.b.d.y, littleEndian); - output.setFloat32((((offset + 16) + 0) + 8), value.b.d.z, littleEndian); + output.setFloat32((((offset + 16) + 0) + 0), value.b.d[0], littleEndian); + output.setFloat32((((offset + 16) + 0) + 4), value.b.d[1], littleEndian); + output.setFloat32((((offset + 16) + 0) + 8), value.b.d[2], littleEndian); for (let i = 0; i < 3; i++) { output.setUint32((((offset + 32) + i * 4) + 0), value.c[i].d, littleEndian); } @@ -69,10 +70,11 @@ describe('buildWriter', () => { const writer = buildWriter(array, 'offset', 'value'); expect(writer).toMatchInlineSnapshot(` - "for (let i = 0; i < 5; i++) { - output.setFloat32(((offset + i * 16) + 0), value[i].x, littleEndian); - output.setFloat32(((offset + i * 16) + 4), value[i].y, littleEndian); - output.setFloat32(((offset + i * 16) + 8), value[i].z, littleEndian); + "var _ta0 = ArrayBuffer.isView(value) && !(value instanceof DataView); + for (let i = 0; i < 5; i++) { + output.setFloat32((offset + i * 16 + 0), _ta0 ? value[i * 3 + 0] : value[i][0], littleEndian); + output.setFloat32((offset + i * 16 + 4), _ta0 ? value[i * 3 + 1] : value[i][1], littleEndian); + output.setFloat32((offset + i * 16 + 8), _ta0 ? value[i * 3 + 2] : value[i][2], littleEndian); } " `); @@ -104,9 +106,10 @@ describe('buildWriter', () => { expect(writer).toMatchInlineSnapshot(` "output.setUint32((offset + 0), value.a, littleEndian); for (let i = 0; i < 2; i++) { + var _ta1 = ArrayBuffer.isView(value.b[i]) && !(value.b[i] instanceof DataView); for (let j = 0; j < 2; j++) { - output.setFloat32(((((offset + 8) + i * 16) + j * 8) + 0), value.b[i][j].x, littleEndian); - output.setFloat32(((((offset + 8) + i * 16) + j * 8) + 4), value.b[i][j].y, littleEndian); + output.setFloat32((((offset + 8) + i * 16) + j * 8 + 0), _ta1 ? value.b[i][j * 2 + 0] : value.b[i][j][0], littleEndian); + output.setFloat32((((offset + 8) + i * 16) + j * 8 + 4), _ta1 ? value.b[i][j * 2 + 1] : value.b[i][j][1], littleEndian); } } " @@ -158,6 +161,123 @@ describe('buildWriter', () => { }); }); +describe('buildWriter (partial mode)', () => { + it('should compile a partial writer for a struct', () => { + const struct = d.struct({ + a: d.u32, + b: d.vec3f, + }); + const writer = buildWriter(struct, 'offset', 'value', 0, true); + + expect(writer).toMatchInlineSnapshot(` + "if (((offset + 0)) < endOffset) { + output.setUint32((offset + 0), value.a, littleEndian); + } + if (((offset + 16) + 0) < endOffset) { + output.setFloat32(((offset + 16) + 0), value.b[0], littleEndian); + } + if (((offset + 16) + 4) < endOffset) { + output.setFloat32(((offset + 16) + 4), value.b[1], littleEndian); + } + if (((offset + 16) + 8) < endOffset) { + output.setFloat32(((offset + 16) + 8), value.b[2], littleEndian); + } + " + `); + }); + + it('should compile a partial writer for a struct with an array', () => { + const struct = d.struct({ + a: d.u32, + b: d.arrayOf(d.vec3f, 2), + c: d.arrayOf(d.u32, 3), + }); + const writer = buildWriter(struct, 'offset', 'value', 0, true); + + expect(writer).toMatchInlineSnapshot(` + "if (((offset + 0)) < endOffset) { + output.setUint32((offset + 0), value.a, littleEndian); + } + var _ta0 = ArrayBuffer.isView(value.b) && !(value.b instanceof DataView); + for (let i = 0; i < 2; i++) { + if (((offset + 16) + i * 16) >= endOffset) { break; } + if (((offset + 16) + i * 16 + 0) < endOffset) { + output.setFloat32(((offset + 16) + i * 16 + 0), _ta0 ? value.b[i * 3 + 0] : value.b[i][0], littleEndian); + } + if (((offset + 16) + i * 16 + 4) < endOffset) { + output.setFloat32(((offset + 16) + i * 16 + 4), _ta0 ? value.b[i * 3 + 1] : value.b[i][1], littleEndian); + } + if (((offset + 16) + i * 16 + 8) < endOffset) { + output.setFloat32(((offset + 16) + i * 16 + 8), _ta0 ? value.b[i * 3 + 2] : value.b[i][2], littleEndian); + } + } + for (let i = 0; i < 3; i++) { + if (((offset + 48) + i * 4) >= endOffset) return; + if ((((offset + 48) + i * 4)) < endOffset) { + output.setUint32(((offset + 48) + i * 4), value.c[i], littleEndian); + } + } + " + `); + }); + + it('should compile a partial writer for an array of u16', () => { + const array = d.arrayOf(d.u16, 5); + + const builtWriter = buildWriter(array, 'offset', 'value', 0, true); + expect(builtWriter).toMatchInlineSnapshot(` + "for (let i = 0; i < 5; i++) { + if ((offset + i * 2) >= endOffset) return; + if (((offset + i * 2)) < endOffset) { + output.setUint16((offset + i * 2), value[i], littleEndian); + } + } + " + `); + }); + + it('should compile a partial writer for unstruct with loose data', () => { + const unstruct = d.unstruct({ + a: d.uint16x2, + b: d.unorm10_10_10_2, + c: d.uint8x2, + d: d.unorm8x4, + }); + + const unstructWriter = buildWriter(unstruct, 'offset', 'value', 0, true); + expect(unstructWriter).toMatchInlineSnapshot(` + "if (((offset + 0) + 0) < endOffset) { + output.setUint16(((offset + 0) + 0), value.a.x, littleEndian); + } + if (((offset + 0) + 2) < endOffset) { + output.setUint16(((offset + 0) + 2), value.a.y, littleEndian); + } + if (((offset + 4)) < endOffset) { + output.setUint32((offset + 4), ((value.b.x*1023&0x3FF)<<22)|((value.b.y*1023&0x3FF)<<12)|((value.b.z*1023&0x3FF)<<2)|(value.b.w*3&3), littleEndian); + } + if (((offset + 8) + 0) < endOffset) { + output.setUint8(((offset + 8) + 0), value.c.x, littleEndian); + } + if (((offset + 8) + 1) < endOffset) { + output.setUint8(((offset + 8) + 1), value.c.y, littleEndian); + } + if (((offset + 10) + 0) < endOffset) { + output.setUint8(((offset + 10) + 0), Math.round(value.d.x * 255), littleEndian); + } + if (((offset + 10) + 1) < endOffset) { + output.setUint8(((offset + 10) + 1), Math.round(value.d.y * 255), littleEndian); + } + if (((offset + 10) + 2) < endOffset) { + output.setUint8(((offset + 10) + 2), Math.round(value.d.z * 255), littleEndian); + } + if (((offset + 10) + 3) < endOffset) { + output.setUint8(((offset + 10) + 3), Math.round(value.d.w * 255), littleEndian); + } + " + `); + }); +}); + describe('createCompileInstructions', () => { it('should compile a writer for a struct', () => { const struct = d.struct({ @@ -320,10 +440,11 @@ describe('createCompileInstructions', () => { "for (let i = 0; i < 2; i++) { for (let j = 0; j < 2; j++) { for (let k = 0; k < 2; k++) { + var _ta3 = ArrayBuffer.isView(value[i][j][k]) && !(value[i][j][k] instanceof DataView); for (let i3 = 0; i3 < 2; i3++) { - output.setFloat32((((((offset + i * 128) + j * 64) + k * 32) + i3 * 16) + 0), value[i][j][k][i3].x, littleEndian); - output.setFloat32((((((offset + i * 128) + j * 64) + k * 32) + i3 * 16) + 4), value[i][j][k][i3].y, littleEndian); - output.setFloat32((((((offset + i * 128) + j * 64) + k * 32) + i3 * 16) + 8), value[i][j][k][i3].z, littleEndian); + output.setFloat32(((((offset + i * 128) + j * 64) + k * 32) + i3 * 16 + 0), _ta3 ? value[i][j][k][i3 * 3 + 0] : value[i][j][k][i3][0], littleEndian); + output.setFloat32(((((offset + i * 128) + j * 64) + k * 32) + i3 * 16 + 4), _ta3 ? value[i][j][k][i3 * 3 + 1] : value[i][j][k][i3][1], littleEndian); + output.setFloat32(((((offset + i * 128) + j * 64) + k * 32) + i3 * 16 + 8), _ta3 ? value[i][j][k][i3 * 3 + 2] : value[i][j][k][i3][2], littleEndian); } } } @@ -362,6 +483,51 @@ describe('createCompileInstructions', () => { } }); + it('should stop writing elements at the given endOffset', () => { + const schema = d.arrayOf(d.vec3u, 4); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); + const dataView = new DataView(arr); + const endLayout = d.memoryLayoutOf(schema, (a) => a[2]); + + writer(dataView, 0, [d.vec3u(1, 2, 3), d.vec3u(4, 5, 6)], true, endLayout.offset); + + expect([...new Uint32Array(arr)]).toStrictEqual([ + 1, 2, 3, 0, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + }); + + it('should write a padded array chunk from the beginning offset through the end when endOffset is omitted', () => { + const schema = d.arrayOf(d.vec3u, 4); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); + const dataView = new DataView(arr); + const layout = d.memoryLayoutOf(schema, (a) => a[1]?.x); + + writer( + dataView, + layout.offset, + [d.vec3u(4, 5, 6), d.vec3u(7, 8, 9), d.vec3u(10, 11, 12)], + true, + ); + + expect([...new Uint32Array(arr)]).toStrictEqual([ + 0, 0, 0, 0, 4, 5, 6, 0, 7, 8, 9, 0, 10, 11, 12, 0, + ]); + }); + + it('should write a scalar array chunk from the beginning offset through the end when endOffset is omitted', () => { + const schema = d.arrayOf(d.u32, 6); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); + const dataView = new DataView(arr); + const layout = d.memoryLayoutOf(schema, (a) => a[3]); + + writer(dataView, layout.offset, [4, 5, 6], true); + + expect([...new Uint32Array(arr)]).toStrictEqual([0, 0, 0, 4, 5, 6]); + }); + it('should compile a writer for a mat4x4f', () => { const Schema = d.struct({ transform: d.mat4x4f, @@ -418,14 +584,6 @@ describe('createCompileInstructions', () => { it('should compile a writer for an array of u16', () => { const array = d.arrayOf(d.u16, 5); - const builtWriter = buildWriter(array, 'offset', 'value'); - expect(builtWriter).toMatchInlineSnapshot(` - "for (let i = 0; i < 5; i++) { - output.setUint16((offset + i * 2), value[i], littleEndian); - } - " - `); - const writer = getCompiledWriterForSchema(array)!; const arr = new ArrayBuffer(sizeOf(array)); @@ -500,13 +658,13 @@ describe('createCompileInstructions', () => { const builtWriter = buildWriter(unstruct, 'offset', 'value'); expect(builtWriter).toMatchInlineSnapshot(` - "output.setFloat32(((offset + 0) + 0), value.a.x, littleEndian); - output.setFloat32(((offset + 0) + 4), value.a.y, littleEndian); - output.setFloat32(((offset + 0) + 8), value.a.z, littleEndian); - output.setFloat32(((offset + 12) + 0), value.b.x, littleEndian); - output.setFloat32(((offset + 12) + 4), value.b.y, littleEndian); - output.setFloat32(((offset + 12) + 8), value.b.z, littleEndian); - output.setFloat32(((offset + 12) + 12), value.b.w, littleEndian); + "output.setFloat32(((offset + 0) + 0), value.a[0], littleEndian); + output.setFloat32(((offset + 0) + 4), value.a[1], littleEndian); + output.setFloat32(((offset + 0) + 8), value.a[2], littleEndian); + output.setFloat32(((offset + 12) + 0), value.b[0], littleEndian); + output.setFloat32(((offset + 12) + 4), value.b[1], littleEndian); + output.setFloat32(((offset + 12) + 8), value.b[2], littleEndian); + output.setFloat32(((offset + 12) + 12), value.b[3], littleEndian); " `); @@ -528,10 +686,11 @@ describe('createCompileInstructions', () => { const builtWriter = buildWriter(disarray, 'offset', 'value'); expect(builtWriter).toMatchInlineSnapshot(` - "for (let i = 0; i < 3; i++) { - output.setFloat32(((offset + i * 12) + 0), value[i].x, littleEndian); - output.setFloat32(((offset + i * 12) + 4), value[i].y, littleEndian); - output.setFloat32(((offset + i * 12) + 8), value[i].z, littleEndian); + "var _ta0 = ArrayBuffer.isView(value) && !(value instanceof DataView); + for (let i = 0; i < 3; i++) { + output.setFloat32((offset + i * 12 + 0), _ta0 ? value[i * 3 + 0] : value[i][0], littleEndian); + output.setFloat32((offset + i * 12 + 4), _ta0 ? value[i * 3 + 1] : value[i][1], littleEndian); + output.setFloat32((offset + i * 12 + 8), _ta0 ? value[i * 3 + 2] : value[i][2], littleEndian); } " `); @@ -546,6 +705,31 @@ describe('createCompileInstructions', () => { expect([...new Float32Array(arr)]).toStrictEqual([1, 2, 3, 4, 5, 6, 7, 8, 9]); }); + it('should compile a writer for a vec3h with 2-byte component offsets', () => { + const writer = buildWriter(d.vec3h, 'offset', 'value'); + + expect(writer).toMatchInlineSnapshot(` + "output.setFloat16((offset + 0), value[0], littleEndian); + output.setFloat16((offset + 2), value[1], littleEndian); + output.setFloat16((offset + 4), value[2], littleEndian); + " + `); + }); + + it('should compile a writer for an array of vec3h with 2-byte component offsets', () => { + const writer = buildWriter(d.arrayOf(d.vec3h, 2), 'offset', 'value'); + + expect(writer).toMatchInlineSnapshot(` + "var _ta0 = ArrayBuffer.isView(value) && !(value instanceof DataView); + for (let i = 0; i < 2; i++) { + output.setFloat16((offset + i * 8 + 0), _ta0 ? value[i * 3 + 0] : value[i][0], littleEndian); + output.setFloat16((offset + i * 8 + 2), _ta0 ? value[i * 3 + 1] : value[i][1], littleEndian); + output.setFloat16((offset + i * 8 + 4), _ta0 ? value[i * 3 + 2] : value[i][2], littleEndian); + } + " + `); + }); + it('should compile for a disarray of unstructs', () => { const unstruct = d.unstruct({ a: d.vec3f, @@ -556,13 +740,13 @@ describe('createCompileInstructions', () => { const builtWriter = buildWriter(disarray, 'offset', 'value'); expect(builtWriter).toMatchInlineSnapshot(` "for (let i = 0; i < 2; i++) { - output.setFloat32((((offset + i * 28) + 0) + 0), value[i].a.x, littleEndian); - output.setFloat32((((offset + i * 28) + 0) + 4), value[i].a.y, littleEndian); - output.setFloat32((((offset + i * 28) + 0) + 8), value[i].a.z, littleEndian); - output.setFloat32((((offset + i * 28) + 12) + 0), value[i].b.x, littleEndian); - output.setFloat32((((offset + i * 28) + 12) + 4), value[i].b.y, littleEndian); - output.setFloat32((((offset + i * 28) + 12) + 8), value[i].b.z, littleEndian); - output.setFloat32((((offset + i * 28) + 12) + 12), value[i].b.w, littleEndian); + output.setFloat32((((offset + i * 28) + 0) + 0), value[i].a[0], littleEndian); + output.setFloat32((((offset + i * 28) + 0) + 4), value[i].a[1], littleEndian); + output.setFloat32((((offset + i * 28) + 0) + 8), value[i].a[2], littleEndian); + output.setFloat32((((offset + i * 28) + 12) + 0), value[i].b[0], littleEndian); + output.setFloat32((((offset + i * 28) + 12) + 4), value[i].b[1], littleEndian); + output.setFloat32((((offset + i * 28) + 12) + 8), value[i].b[2], littleEndian); + output.setFloat32((((offset + i * 28) + 12) + 12), value[i].b[3], littleEndian); } " `); @@ -649,6 +833,179 @@ describe('createCompileInstructions', () => { expect(decoded.d.w).toBeCloseTo(inputData.d.w, 2); }); + // ---------- + // InferInput: plain tuples and TypedArrays as input + // ---------- + + it('should write a vec3f from a plain tuple', () => { + const schema = d.vec3f; + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(16); // vec3f is 12 bytes but align = 16 + const dataView = new DataView(arr); + + writer(dataView, 0, [1, 2, 3]); + + expect([...new Float32Array(arr, 0, 3)]).toStrictEqual([1, 2, 3]); + }); + + it('should write a vec3f from a Float32Array', () => { + const schema = d.vec3f; + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(16); + const dataView = new DataView(arr); + + writer(dataView, 0, new Float32Array([1, 2, 3])); + + expect([...new Float32Array(arr, 0, 3)]).toStrictEqual([1, 2, 3]); + }); + + it('should write an array of vec3f from plain tuples', () => { + const schema = d.arrayOf(d.vec3f, 3); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 3 * 16 = 48 bytes (with padding) + const dataView = new DataView(arr); + + writer(dataView, 0, [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ]); + + // GPU layout: each vec3f occupies 16 bytes (12 data + 4 padding) + expect([...new Float32Array(arr, 0, 3)]).toStrictEqual([1, 2, 3]); + expect([...new Float32Array(arr, 16, 3)]).toStrictEqual([4, 5, 6]); + expect([...new Float32Array(arr, 32, 3)]).toStrictEqual([7, 8, 9]); + }); + + it('should write an array of vec3f from a flat Float32Array (stride correction: packed 3→padded 4)', () => { + // GPU layout: vec3f has 16-byte stride (12 bytes + 4 padding) + // Input: packed Float32Array with 3 floats per element (no padding) + const schema = d.arrayOf(d.vec3f, 3); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 48 bytes + const dataView = new DataView(arr); + + writer(dataView, 0, new Float32Array([1, 2, 3, 4, 5, 6, 7, 8, 9])); + + expect([...new Float32Array(arr, 0, 3)]).toStrictEqual([1, 2, 3]); + expect([...new Float32Array(arr, 16, 3)]).toStrictEqual([4, 5, 6]); + expect([...new Float32Array(arr, 32, 3)]).toStrictEqual([7, 8, 9]); + }); + + it('should write an array of vec4f from a flat Float32Array (no stride correction needed)', () => { + // GPU layout: vec4f has 16-byte stride (16 bytes, no padding) + // Input: Float32Array with 4 floats per element = same layout + const schema = d.arrayOf(d.vec4f, 2); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 32 bytes + const dataView = new DataView(arr); + + writer(dataView, 0, new Float32Array([1, 2, 3, 4, 5, 6, 7, 8])); + + expect([...new Float32Array(arr, 0, 4)]).toStrictEqual([1, 2, 3, 4]); + expect([...new Float32Array(arr, 16, 4)]).toStrictEqual([5, 6, 7, 8]); + }); + + it('should write an array of vec3u from a flat Uint32Array', () => { + const schema = d.arrayOf(d.vec3u, 2); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 2 * 16 = 32 bytes + const dataView = new DataView(arr); + + writer(dataView, 0, new Uint32Array([10, 20, 30, 40, 50, 60])); + + expect([...new Uint32Array(arr, 0, 3)]).toStrictEqual([10, 20, 30]); + expect([...new Uint32Array(arr, 16, 3)]).toStrictEqual([40, 50, 60]); + }); + + it('should write an array of vec3f where each element is its own Float32Array', () => { + const schema = d.arrayOf(d.vec3f, 3); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 48 bytes + const dataView = new DataView(arr); + + // Three views into a single shared ArrayBuffer + const sharedBuffer = new ArrayBuffer(36); // 3 * 3 floats * 4 bytes + const v0 = new Float32Array(sharedBuffer, 0, 3); + const v1 = new Float32Array(sharedBuffer, 12, 3); + const v2 = new Float32Array(sharedBuffer, 24, 3); + v0.set([1, 2, 3]); + v1.set([4, 5, 6]); + v2.set([7, 8, 9]); + + writer(dataView, 0, [v0, v1, v2]); + + expect([...new Float32Array(arr, 0, 3)]).toStrictEqual([1, 2, 3]); + expect([...new Float32Array(arr, 16, 3)]).toStrictEqual([4, 5, 6]); + expect([...new Float32Array(arr, 32, 3)]).toStrictEqual([7, 8, 9]); + }); + + it('should write a vec3h from a plain tuple with correct 2-byte component offsets', () => { + const schema = d.vec3h; + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(8); // vec3h alignment = 8 bytes + const dataView = new DataView(arr); + + writer(dataView, 0, [1.5, 2.5, 3.5]); + + expect(dataView.getFloat16(0, true)).toBeCloseTo(1.5); + expect(dataView.getFloat16(2, true)).toBeCloseTo(2.5); + expect(dataView.getFloat16(4, true)).toBeCloseTo(3.5); + }); + + it('should write an arrayOf(vec2h) from plain tuples with correct 2-byte component offsets', () => { + const schema = d.arrayOf(d.vec2h, 3); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 3 * 4 = 12 bytes + const dataView = new DataView(arr); + + writer(dataView, 0, [ + [1, 2], + [3, 4], + [5, 6], + ]); + + // Each vec2h: 2 components × 2 bytes = 4 bytes per element (no padding needed) + expect(dataView.getFloat16(0, true)).toBeCloseTo(1); + expect(dataView.getFloat16(2, true)).toBeCloseTo(2); + expect(dataView.getFloat16(4, true)).toBeCloseTo(3); + expect(dataView.getFloat16(6, true)).toBeCloseTo(4); + expect(dataView.getFloat16(8, true)).toBeCloseTo(5); + expect(dataView.getFloat16(10, true)).toBeCloseTo(6); + }); + + it('should write an arrayOf(vec3h) from plain tuples with stride-corrected 2-byte offsets', () => { + // vec3h: 6 bytes data, 8-byte stride (2 bytes padding per element) + const schema = d.arrayOf(d.vec3h, 2); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 2 * 8 = 16 bytes + const dataView = new DataView(arr); + + writer(dataView, 0, [ + [1, 2, 3], + [4, 5, 6], + ]); + + // First element at byte 0, second at byte 8 + expect(dataView.getFloat16(0, true)).toBeCloseTo(1); + expect(dataView.getFloat16(2, true)).toBeCloseTo(2); + expect(dataView.getFloat16(4, true)).toBeCloseTo(3); + expect(dataView.getFloat16(8, true)).toBeCloseTo(4); + expect(dataView.getFloat16(10, true)).toBeCloseTo(5); + expect(dataView.getFloat16(12, true)).toBeCloseTo(6); + }); + + it('should write an array of f32 scalars from a Float32Array', () => { + const schema = d.arrayOf(d.f32, 4); + const writer = getCompiledWriterForSchema(schema)!; + const arr = new ArrayBuffer(sizeOf(schema)); // 16 bytes + const dataView = new DataView(arr); + + writer(dataView, 0, new Float32Array([1.5, 2.5, 3.5, 4.5])); + + expect([...new Float32Array(arr)]).toStrictEqual([1.5, 2.5, 3.5, 4.5]); + }); + it('should work for disarrays of unstructs containing loose data', () => { const unstruct = d.unstruct({ a: d.unorm16x2, diff --git a/packages/typegpu/tests/computePipeline.test.ts b/packages/typegpu/tests/computePipeline.test.ts index 0c81b7009..b73b378b0 100644 --- a/packages/typegpu/tests/computePipeline.test.ts +++ b/packages/typegpu/tests/computePipeline.test.ts @@ -618,7 +618,7 @@ describe('TgpuComputePipeline', () => { const deepBuffer = root.createBuffer(DeepStruct).$usage('indirect'); pipeline.dispatchWorkgroupsIndirect( deepBuffer, - d.memoryLayoutOf(DeepStruct, (s) => s.someData[11] as number), + d.memoryLayoutOf(DeepStruct, (s) => s.someData[11]), ); expect(warnSpy.mock.calls[1]![0]).toMatchInlineSnapshot( @@ -627,7 +627,7 @@ describe('TgpuComputePipeline', () => { pipeline.dispatchWorkgroupsIndirect( deepBuffer, - d.memoryLayoutOf(DeepStruct, (s) => s.nested.innerNested[0]?.yy as number), + d.memoryLayoutOf(DeepStruct, (s) => s.nested.innerNested[0]?.yy), ); expect(warnSpy.mock.calls[2]![0]).toMatchInlineSnapshot( diff --git a/packages/typegpu/tests/offsetUtils.test.ts b/packages/typegpu/tests/offsetUtils.test.ts index 847fa0911..96c2409d5 100644 --- a/packages/typegpu/tests/offsetUtils.test.ts +++ b/packages/typegpu/tests/offsetUtils.test.ts @@ -43,7 +43,7 @@ describe('d.memoryLayoutOf (arrays)', () => { it('computes offsets for array elements without padding', () => { const Schema = d.arrayOf(d.u32, 6); - const info = d.memoryLayoutOf(Schema, (a) => a[3] as number); + const info = d.memoryLayoutOf(Schema, (a) => a[3]); expect(info.offset).toBe(12); expect(info.contiguous).toBe(12); @@ -52,7 +52,7 @@ describe('d.memoryLayoutOf (arrays)', () => { it('limits contiguous bytes to element size when array stride has padding', () => { const Schema = d.arrayOf(d.vec3u, 3); - const info = d.memoryLayoutOf(Schema, (a) => a[1]?.x as number); + const info = d.memoryLayoutOf(Schema, (a) => a[1]?.x); expect(info.offset).toBe(16); expect(info.contiguous).toBe(12); @@ -109,21 +109,21 @@ describe('d.memoryLayoutOf (nested layouts)', () => { }); it('tracks offsets and contiguous bytes within nested arrays', () => { - const info = d.memoryLayoutOf(DeepStruct, (s) => s.someData[11] as number); + const info = d.memoryLayoutOf(DeepStruct, (s) => s.someData[11]); expect(info.offset).toBe(44); expect(info.contiguous).toBe(8); }); it('tracks offsets for nested structs inside arrays', () => { - const info = d.memoryLayoutOf(DeepStruct, (s) => s.nested.innerNested[1]?.myVec.x as number); + const info = d.memoryLayoutOf(DeepStruct, (s) => s.nested.innerNested[1]?.myVec.x); expect(info.offset).toBe(128); expect(info.contiguous).toBe(28); }); it('tracks offsets inside a later struct run', () => { - const info = d.memoryLayoutOf(DeepStruct, (s) => s.nested.additionalData[1] as number); + const info = d.memoryLayoutOf(DeepStruct, (s) => s.nested.additionalData[1]); expect(info.offset).toBe(184); expect(info.contiguous).toBe(124); @@ -141,7 +141,7 @@ describe('d.memoryLayoutOf (edge cases)', () => { arr: d.arrayOf(E, 3), }); - const info = d.memoryLayoutOf(S, (s) => s.arr[1]?.vec.x as number); + const info = d.memoryLayoutOf(S, (s) => s.arr[1]?.vec.x); expect(info.offset).toBe(48); expect(info.contiguous).toBe(20); @@ -175,7 +175,7 @@ describe('d.memoryLayoutOf (edge cases)', () => { arr: d.arrayOf(E, 4), }); - const info = d.memoryLayoutOf(S, (s) => s.arr[1]?.x.x as number); + const info = d.memoryLayoutOf(S, (s) => s.arr[1]?.x.x); expect(info.offset).toBe(64); expect(info.contiguous).toBe(192); @@ -191,7 +191,7 @@ describe('d.memoryLayoutOf (edge cases)', () => { s: I, }); - const info = d.memoryLayoutOf(S, (s) => s.arr[0]?.y as number); + const info = d.memoryLayoutOf(S, (s) => s.arr[0]?.y); expect(info.offset).toBe(4); expect(info.contiguous).toBe(16);