Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2562,11 +2562,12 @@ impl DocumentMessageHandler {
.icon("RenderModeOutline")
.tooltip_label("Render Mode: Outline")
.on_update(|_| DocumentMessage::SetRenderMode { render_mode: RenderMode::Outline }.into()),
// TODO: See issue #320
// RadioEntryData::new("PixelPreview")
// .icon("RenderModePixels")
// .tooltip_label("Render Mode: Pixel Preview")
// .on_update(|_| todo!()),
RadioEntryData::new("PixelPreview").icon("RenderModePixels").tooltip_label("Render Mode: Pixel Preview").on_update(|_| {
DocumentMessage::SetRenderMode {
render_mode: RenderMode::PixelPreview,
}
.into()
}),
RadioEntryData::new("SvgPreview")
.icon("RenderModeSvg")
.tooltip_label("Render Mode: SVG Preview")
Expand All @@ -2577,7 +2578,7 @@ impl DocumentMessageHandler {
if disabled {
for entry in &mut entries {
entry.tooltip_description = "
*Normal* and *Outline* render modes are not available in this browser. For compatibility, *SVG Preview* mode is active as a fallback.\n\
*Normal*, *Outline*, and *Pixel Preview* render modes are not available in this browser. For compatibility, *SVG Preview* mode is active as a fallback.\n\
\n\
This functionality requires WebGPU support. Check webgpu.org for browser implementation status.
"
Expand Down
4 changes: 2 additions & 2 deletions node-graph/libraries/vector-types/src/vector/style.rs
Original file line number Diff line number Diff line change
Expand Up @@ -666,8 +666,8 @@ pub enum RenderMode {
Normal = 0,
/// Render only the outlines of shapes at the current viewport resolution
Outline,
// /// Render with normal coloration at the document resolution, showing the pixels when the current viewport resolution is higher
// PixelPreview,
/// Render with normal coloration at the document export resolution; at zoom > 100% this shows individual export pixels upscaled with nearest-neighbor filtering
PixelPreview,
/// Render a preview of how the object would be exported as an SVG.
SvgPreview,
}
178 changes: 178 additions & 0 deletions node-graph/libraries/wgpu-executor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,11 +154,189 @@ impl WgpuExecutor {
Ok(())
}

/// Resample `source_texture` into a new texture of `target_size` using an affine transform.
/// For each output pixel `p`, the source texel coordinate is `source_transform * p + source_offset`.
/// `filter` selects interpolation: `Nearest` for sharp pixel boundaries (used by the Pixel Preview render mode),
/// `Linear` for smooth bilinear interpolation (used by tilted viewport compositing).
pub fn resample_texture(&self, source_texture: &wgpu::Texture, target_size: UVec2, source_transform: glam::Mat2, source_offset: glam::Vec2, filter: wgpu::FilterMode) -> Result<wgpu::Texture> {
let device = &self.context.device;
let queue = &self.context.queue;

let output_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("resample_output"),
size: wgpu::Extent3d {
width: target_size.x,
height: target_size.y,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: VELLO_SURFACE_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});

// Layout: mat2x2<f32> (4 floats = 16 bytes) + vec2<f32> (2 floats = 8 bytes) = 24 bytes
let mut params_data = [0_u8; 24];
params_data[0..4].copy_from_slice(&source_transform.x_axis.x.to_le_bytes());
params_data[4..8].copy_from_slice(&source_transform.x_axis.y.to_le_bytes());
params_data[8..12].copy_from_slice(&source_transform.y_axis.x.to_le_bytes());
params_data[12..16].copy_from_slice(&source_transform.y_axis.y.to_le_bytes());
params_data[16..20].copy_from_slice(&source_offset.x.to_le_bytes());
params_data[20..24].copy_from_slice(&source_offset.y.to_le_bytes());
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("resample_params"),
size: 24,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
queue.write_buffer(&uniform_buf, 0, &params_data);
Comment on lines +180 to +194
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Manually constructing the params_data byte array and using a magic number for the size is error-prone. It would be safer and more readable to define a #[repr(C)] struct that matches the shader's uniform buffer layout and use bytemuck to safely cast it to a byte slice. This avoids manual byte manipulation and makes the code's intent clearer.

You would need to add bytemuck = { version = "1", features = ["derive"] } to your Cargo.toml and use bytemuck::{Pod, Zeroable};.

		#[repr(C)]
		#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
		struct Uniforms {
			transform: glam::Mat2,
			offset: glam::Vec2,
		}

		let params_data = *bytemuck::cast_ref(&Uniforms {
			transform: source_transform,
			offset: source_offset,
		});

		let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
			label: Some("resample_params"),
			size: params_data.len() as u64,
			usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
			mapped_at_creation: false,
		});
		queue.write_buffer(&uniform_buf, 0, &params_data);


let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("resample_sampler"),
mag_filter: filter,
min_filter: filter,
..Default::default()
});

let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("resample_blit"),
source: wgpu::ShaderSource::Wgsl(
r#"
@vertex fn vs(@builtin(vertex_index) vi: u32) -> @builtin(position) vec4<f32> {
var pos = array<vec2<f32>, 3>(vec2(-1., 3.), vec2(-1., -1.), vec2(3., -1.));
return vec4(pos[vi], 0., 1.);
}
@group(0) @binding(0) var src: texture_2d<f32>;
@group(0) @binding(1) var src_sampler: sampler;
struct Params { transform: mat2x2<f32>, offset: vec2<f32> }
@group(0) @binding(2) var<uniform> params: Params;
@fragment fn fs(@builtin(position) pos: vec4<f32>) -> @location(0) vec4<f32> {
let src_coord = params.transform * pos.xy + params.offset;
let uv = src_coord / vec2<f32>(textureDimensions(src));
return textureSample(src, src_sampler, uv);
}
"#
.into(),
),
});

let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});

let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("resample_pipeline"),
layout: Some(&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
})),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs"),
buffers: &[],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs"),
targets: &[Some(wgpu::ColorTargetState {
format: VELLO_SURFACE_FORMAT,
blend: None,
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});

let src_view = source_texture.create_view(&wgpu::TextureViewDescriptor::default());
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&src_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: uniform_buf.as_entire_binding(),
},
],
});

let out_view = output_texture.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("resample_blit") });
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &out_view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
pass.set_pipeline(&pipeline);
pass.set_bind_group(0, &bind_group, &[]);
pass.draw(0..3, 0..1);
}
queue.submit([encoder.finish()]);

Ok(output_texture)
}

#[cfg(target_family = "wasm")]
pub fn create_surface(&self, canvas: graphene_application_io::WasmSurfaceHandle) -> Result<SurfaceHandle<Surface>> {
let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Canvas(canvas.surface))?;
self.create_surface_inner(surface, canvas.window_id)
}

#[cfg(not(target_family = "wasm"))]
pub fn create_surface(&self, window: SurfaceHandle<Window>) -> Result<SurfaceHandle<Surface>> {
let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Window(Box::new(window.surface)))?;
Expand Down
78 changes: 59 additions & 19 deletions node-graph/nodes/gstd/src/render_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,11 +378,23 @@ pub async fn render_output_cache<'a: 'n>(
let device_scale = render_params.scale;
let physical_scale = logical_scale * device_scale;

let viewport_bounds = footprint.viewport_bounds_in_local_space();
let viewport_bounds = AxisAlignedBbox {
start: viewport_bounds.start,
end: viewport_bounds.start + viewport_bounds.size() / device_scale,
};
// Compute the correct AABB of the viewport in document space by transforming all 4 corners.
// This handles tilted (rotated) viewports where only 2 corners would give an incorrect AABB.
let inverse = footprint.transform.inverse();
let logical_size = physical_resolution.as_dvec2() / device_scale;
let corners = [
inverse.transform_point2(DVec2::ZERO),
inverse.transform_point2(DVec2::new(logical_size.x, 0.)),
inverse.transform_point2(logical_size),
inverse.transform_point2(DVec2::new(0., logical_size.y)),
];
let doc_min = corners.iter().copied().reduce(|a, b| a.min(b)).unwrap();
let doc_max = corners.iter().copied().reduce(|a, b| a.max(b)).unwrap();
let viewport_bounds = AxisAlignedBbox { start: doc_min, end: doc_max };

// Detect if the viewport is tilted
let footprint_matrix = footprint.transform.matrix2;
let has_tilt = footprint_matrix.x_axis.y.abs() > 1e-10 || footprint_matrix.y_axis.x.abs() > 1e-10;

let cache_key = CacheKey::new(
render_params.render_mode as u64,
Expand Down Expand Up @@ -421,7 +433,7 @@ pub async fn render_output_cache<'a: 'n>(
}

let exec = editor_api.application_io.as_ref().unwrap().gpu_executor().unwrap();
let (output_texture, combined_metadata) = composite_cached_regions(&all_regions, &viewport_bounds, physical_resolution, logical_scale, physical_scale, exec);
let (output_texture, combined_metadata) = composite_cached_regions(&all_regions, &viewport_bounds, physical_resolution, logical_scale, physical_scale, exec, has_tilt, footprint.transform);

RenderOutput {
data: RenderOutputType::Texture(ImageTexture { texture: output_texture }),
Expand Down Expand Up @@ -485,23 +497,34 @@ where
}
}

#[allow(clippy::too_many_arguments)]
fn composite_cached_regions(
regions: &[CachedRegion],
viewport_bounds: &AxisAlignedBbox,
output_resolution: UVec2,
logical_scale: f64,
physical_scale: f64,
exec: &wgpu_executor::WgpuExecutor,
has_tilt: bool,
footprint_transform: glam::DAffine2,
) -> (wgpu::Texture, rendering::RenderMetadata) {
let device = &exec.context.device;
let queue = &exec.context.queue;
let device_scale = physical_scale / logical_scale;

// When tilted, we composite tiles into an intermediate document-space texture, then apply the viewport rotation via a bilinear sampling shader.
// When not tilted, we blit tiles directly into the output texture (no extra pass needed).
let composite_resolution = if has_tilt {
(viewport_bounds.size() * physical_scale).ceil().as_uvec2().max(UVec2::ONE)
} else {
output_resolution
};

// TODO: Use texture pool to reuse existing unused textures instead of allocating fresh ones every time
let output_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("viewport_output"),
let composite_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some(if has_tilt { "tilt_intermediate" } else { "viewport_output" }),
size: wgpu::Extent3d {
width: output_resolution.x,
height: output_resolution.y,
width: composite_resolution.x,
height: composite_resolution.y,
depth_or_array_layers: 1,
},
mip_level_count: 1,
Expand All @@ -516,7 +539,6 @@ fn composite_cached_regions(
let mut combined_metadata = rendering::RenderMetadata::default();

// Calculate viewport pixel offset using round() to match region boundary calculations
let device_scale = physical_scale / logical_scale;
let viewport_pixel_start = (viewport_bounds.start * physical_scale).round().as_ivec2();

for region in regions {
Expand All @@ -527,17 +549,17 @@ fn composite_cached_regions(
let offset_pixels = region_pixel_start - viewport_pixel_start;

let (src_x, dst_x, width) = if offset_pixels.x >= 0 {
(0, offset_pixels.x as u32, region.texture_size.x.min(output_resolution.x.saturating_sub(offset_pixels.x as u32)))
(0, offset_pixels.x as u32, region.texture_size.x.min(composite_resolution.x.saturating_sub(offset_pixels.x as u32)))
} else {
let skip = (-offset_pixels.x) as u32;
(skip, 0, region.texture_size.x.saturating_sub(skip).min(output_resolution.x))
(skip, 0, region.texture_size.x.saturating_sub(skip).min(composite_resolution.x))
};

let (src_y, dst_y, height) = if offset_pixels.y >= 0 {
(0, offset_pixels.y as u32, region.texture_size.y.min(output_resolution.y.saturating_sub(offset_pixels.y as u32)))
(0, offset_pixels.y as u32, region.texture_size.y.min(composite_resolution.y.saturating_sub(offset_pixels.y as u32)))
} else {
let skip = (-offset_pixels.y) as u32;
(skip, 0, region.texture_size.y.saturating_sub(skip).min(output_resolution.y))
(skip, 0, region.texture_size.y.saturating_sub(skip).min(composite_resolution.y))
};

if width > 0 && height > 0 {
Expand All @@ -549,7 +571,7 @@ fn composite_cached_regions(
aspect: wgpu::TextureAspect::All,
},
wgpu::TexelCopyTextureInfo {
texture: &output_texture,
texture: &composite_texture,
mip_level: 0,
origin: wgpu::Origin3d { x: dst_x, y: dst_y, z: 0 },
aspect: wgpu::TextureAspect::All,
Expand All @@ -564,11 +586,29 @@ fn composite_cached_regions(

// Transform metadata from document space to viewport logical pixels
let mut region_metadata = region.metadata.clone();
let document_to_viewport = glam::DAffine2::from_scale(DVec2::splat(logical_scale)) * glam::DAffine2::from_translation(-viewport_bounds.start);
let document_to_viewport = if has_tilt {
footprint_transform
} else {
glam::DAffine2::from_scale(DVec2::splat(logical_scale)) * glam::DAffine2::from_translation(-viewport_bounds.start)
};
region_metadata.apply_transform(document_to_viewport);
combined_metadata.merge(&region_metadata);
}

queue.submit([encoder.finish()]);
(output_texture, combined_metadata)

if has_tilt {
// Apply viewport rotation by sampling the intermediate document-space texture
// with a bilinear-filtered affine transform to produce the final viewport output.
// For each output pixel p: texel = inv.matrix2 * p * logical_scale + (inv.translation - viewport_bounds.start) * physical_scale
let inv = footprint_transform.inverse();
let source_transform = glam::Mat2::from_cols((inv.matrix2.x_axis * logical_scale).as_vec2(), (inv.matrix2.y_axis * logical_scale).as_vec2());
let source_offset = ((inv.translation - viewport_bounds.start) * physical_scale).as_vec2();
let output_texture = exec
.resample_texture(&composite_texture, output_resolution, source_transform, source_offset, wgpu::FilterMode::Linear)
.expect("Failed to apply viewport tilt rotation");
(output_texture, combined_metadata)
} else {
(composite_texture, combined_metadata)
}
}
Loading