diff --git a/editor/src/messages/portfolio/document/document_message_handler.rs b/editor/src/messages/portfolio/document/document_message_handler.rs index 76fbfa4828..a9c64077dc 100644 --- a/editor/src/messages/portfolio/document/document_message_handler.rs +++ b/editor/src/messages/portfolio/document/document_message_handler.rs @@ -2562,11 +2562,12 @@ impl DocumentMessageHandler { .icon("RenderModeOutline") .tooltip_label("Render Mode: Outline") .on_update(|_| DocumentMessage::SetRenderMode { render_mode: RenderMode::Outline }.into()), - // TODO: See issue #320 - // RadioEntryData::new("PixelPreview") - // .icon("RenderModePixels") - // .tooltip_label("Render Mode: Pixel Preview") - // .on_update(|_| todo!()), + RadioEntryData::new("PixelPreview").icon("RenderModePixels").tooltip_label("Render Mode: Pixel Preview").on_update(|_| { + DocumentMessage::SetRenderMode { + render_mode: RenderMode::PixelPreview, + } + .into() + }), RadioEntryData::new("SvgPreview") .icon("RenderModeSvg") .tooltip_label("Render Mode: SVG Preview") @@ -2577,7 +2578,7 @@ impl DocumentMessageHandler { if disabled { for entry in &mut entries { entry.tooltip_description = " - *Normal* and *Outline* render modes are not available in this browser. For compatibility, *SVG Preview* mode is active as a fallback.\n\ + *Normal*, *Outline*, and *Pixel Preview* render modes are not available in this browser. For compatibility, *SVG Preview* mode is active as a fallback.\n\ \n\ This functionality requires WebGPU support. Check webgpu.org for browser implementation status. " diff --git a/node-graph/libraries/vector-types/src/vector/style.rs b/node-graph/libraries/vector-types/src/vector/style.rs index dc584675b0..5a5ee49e89 100644 --- a/node-graph/libraries/vector-types/src/vector/style.rs +++ b/node-graph/libraries/vector-types/src/vector/style.rs @@ -666,8 +666,8 @@ pub enum RenderMode { Normal = 0, /// Render only the outlines of shapes at the current viewport resolution Outline, - // /// Render with normal coloration at the document resolution, showing the pixels when the current viewport resolution is higher - // PixelPreview, + /// Render with normal coloration at the document export resolution; at zoom > 100% this shows individual export pixels upscaled with nearest-neighbor filtering + PixelPreview, /// Render a preview of how the object would be exported as an SVG. SvgPreview, } diff --git a/node-graph/libraries/wgpu-executor/src/lib.rs b/node-graph/libraries/wgpu-executor/src/lib.rs index bdff32ee9f..8af25676a9 100644 --- a/node-graph/libraries/wgpu-executor/src/lib.rs +++ b/node-graph/libraries/wgpu-executor/src/lib.rs @@ -154,11 +154,189 @@ impl WgpuExecutor { Ok(()) } + /// Resample `source_texture` into a new texture of `target_size` using an affine transform. + /// For each output pixel `p`, the source texel coordinate is `source_transform * p + source_offset`. + /// `filter` selects interpolation: `Nearest` for sharp pixel boundaries (used by the Pixel Preview render mode), + /// `Linear` for smooth bilinear interpolation (used by tilted viewport compositing). + pub fn resample_texture(&self, source_texture: &wgpu::Texture, target_size: UVec2, source_transform: glam::Mat2, source_offset: glam::Vec2, filter: wgpu::FilterMode) -> Result { + let device = &self.context.device; + let queue = &self.context.queue; + + let output_texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some("resample_output"), + size: wgpu::Extent3d { + width: target_size.x, + height: target_size.y, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: VELLO_SURFACE_FORMAT, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }); + + // Layout: mat2x2 (4 floats = 16 bytes) + vec2 (2 floats = 8 bytes) = 24 bytes + let mut params_data = [0_u8; 24]; + params_data[0..4].copy_from_slice(&source_transform.x_axis.x.to_le_bytes()); + params_data[4..8].copy_from_slice(&source_transform.x_axis.y.to_le_bytes()); + params_data[8..12].copy_from_slice(&source_transform.y_axis.x.to_le_bytes()); + params_data[12..16].copy_from_slice(&source_transform.y_axis.y.to_le_bytes()); + params_data[16..20].copy_from_slice(&source_offset.x.to_le_bytes()); + params_data[20..24].copy_from_slice(&source_offset.y.to_le_bytes()); + let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor { + label: Some("resample_params"), + size: 24, + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + queue.write_buffer(&uniform_buf, 0, ¶ms_data); + + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { + label: Some("resample_sampler"), + mag_filter: filter, + min_filter: filter, + ..Default::default() + }); + + let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { + label: Some("resample_blit"), + source: wgpu::ShaderSource::Wgsl( + r#" + @vertex fn vs(@builtin(vertex_index) vi: u32) -> @builtin(position) vec4 { + var pos = array, 3>(vec2(-1., 3.), vec2(-1., -1.), vec2(3., -1.)); + return vec4(pos[vi], 0., 1.); + } + @group(0) @binding(0) var src: texture_2d; + @group(0) @binding(1) var src_sampler: sampler; + struct Params { transform: mat2x2, offset: vec2 } + @group(0) @binding(2) var params: Params; + @fragment fn fs(@builtin(position) pos: vec4) -> @location(0) vec4 { + let src_coord = params.transform * pos.xy + params.offset; + let uv = src_coord / vec2(textureDimensions(src)); + return textureSample(src, src_sampler, uv); + } + "# + .into(), + ), + }); + + let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: None, + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 2, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }, + ], + }); + + let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("resample_pipeline"), + layout: Some(&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[&bind_group_layout], + push_constant_ranges: &[], + })), + vertex: wgpu::VertexState { + module: &shader, + entry_point: Some("vs"), + buffers: &[], + compilation_options: Default::default(), + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: Some("fs"), + targets: &[Some(wgpu::ColorTargetState { + format: VELLO_SURFACE_FORMAT, + blend: None, + write_mask: wgpu::ColorWrites::ALL, + })], + compilation_options: Default::default(), + }), + primitive: wgpu::PrimitiveState::default(), + depth_stencil: None, + multisample: wgpu::MultisampleState::default(), + multiview: None, + cache: None, + }); + + let src_view = source_texture.create_view(&wgpu::TextureViewDescriptor::default()); + let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&src_view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&sampler), + }, + wgpu::BindGroupEntry { + binding: 2, + resource: uniform_buf.as_entire_binding(), + }, + ], + }); + + let out_view = output_texture.create_view(&wgpu::TextureViewDescriptor::default()); + let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("resample_blit") }); + { + let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: None, + color_attachments: &[Some(wgpu::RenderPassColorAttachment { + view: &out_view, + resolve_target: None, + depth_slice: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT), + store: wgpu::StoreOp::Store, + }, + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + pass.set_pipeline(&pipeline); + pass.set_bind_group(0, &bind_group, &[]); + pass.draw(0..3, 0..1); + } + queue.submit([encoder.finish()]); + + Ok(output_texture) + } + #[cfg(target_family = "wasm")] pub fn create_surface(&self, canvas: graphene_application_io::WasmSurfaceHandle) -> Result> { let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Canvas(canvas.surface))?; self.create_surface_inner(surface, canvas.window_id) } + #[cfg(not(target_family = "wasm"))] pub fn create_surface(&self, window: SurfaceHandle) -> Result> { let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Window(Box::new(window.surface)))?; diff --git a/node-graph/nodes/gstd/src/render_cache.rs b/node-graph/nodes/gstd/src/render_cache.rs index 022f577ee4..3dc6667e2c 100644 --- a/node-graph/nodes/gstd/src/render_cache.rs +++ b/node-graph/nodes/gstd/src/render_cache.rs @@ -378,11 +378,23 @@ pub async fn render_output_cache<'a: 'n>( let device_scale = render_params.scale; let physical_scale = logical_scale * device_scale; - let viewport_bounds = footprint.viewport_bounds_in_local_space(); - let viewport_bounds = AxisAlignedBbox { - start: viewport_bounds.start, - end: viewport_bounds.start + viewport_bounds.size() / device_scale, - }; + // Compute the correct AABB of the viewport in document space by transforming all 4 corners. + // This handles tilted (rotated) viewports where only 2 corners would give an incorrect AABB. + let inverse = footprint.transform.inverse(); + let logical_size = physical_resolution.as_dvec2() / device_scale; + let corners = [ + inverse.transform_point2(DVec2::ZERO), + inverse.transform_point2(DVec2::new(logical_size.x, 0.)), + inverse.transform_point2(logical_size), + inverse.transform_point2(DVec2::new(0., logical_size.y)), + ]; + let doc_min = corners.iter().copied().reduce(|a, b| a.min(b)).unwrap(); + let doc_max = corners.iter().copied().reduce(|a, b| a.max(b)).unwrap(); + let viewport_bounds = AxisAlignedBbox { start: doc_min, end: doc_max }; + + // Detect if the viewport is tilted + let footprint_matrix = footprint.transform.matrix2; + let has_tilt = footprint_matrix.x_axis.y.abs() > 1e-10 || footprint_matrix.y_axis.x.abs() > 1e-10; let cache_key = CacheKey::new( render_params.render_mode as u64, @@ -421,7 +433,7 @@ pub async fn render_output_cache<'a: 'n>( } let exec = editor_api.application_io.as_ref().unwrap().gpu_executor().unwrap(); - let (output_texture, combined_metadata) = composite_cached_regions(&all_regions, &viewport_bounds, physical_resolution, logical_scale, physical_scale, exec); + let (output_texture, combined_metadata) = composite_cached_regions(&all_regions, &viewport_bounds, physical_resolution, logical_scale, physical_scale, exec, has_tilt, footprint.transform); RenderOutput { data: RenderOutputType::Texture(ImageTexture { texture: output_texture }), @@ -485,6 +497,7 @@ where } } +#[allow(clippy::too_many_arguments)] fn composite_cached_regions( regions: &[CachedRegion], viewport_bounds: &AxisAlignedBbox, @@ -492,16 +505,26 @@ fn composite_cached_regions( logical_scale: f64, physical_scale: f64, exec: &wgpu_executor::WgpuExecutor, + has_tilt: bool, + footprint_transform: glam::DAffine2, ) -> (wgpu::Texture, rendering::RenderMetadata) { let device = &exec.context.device; let queue = &exec.context.queue; + let device_scale = physical_scale / logical_scale; + + // When tilted, we composite tiles into an intermediate document-space texture, then apply the viewport rotation via a bilinear sampling shader. + // When not tilted, we blit tiles directly into the output texture (no extra pass needed). + let composite_resolution = if has_tilt { + (viewport_bounds.size() * physical_scale).ceil().as_uvec2().max(UVec2::ONE) + } else { + output_resolution + }; - // TODO: Use texture pool to reuse existing unused textures instead of allocating fresh ones every time - let output_texture = device.create_texture(&wgpu::TextureDescriptor { - label: Some("viewport_output"), + let composite_texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some(if has_tilt { "tilt_intermediate" } else { "viewport_output" }), size: wgpu::Extent3d { - width: output_resolution.x, - height: output_resolution.y, + width: composite_resolution.x, + height: composite_resolution.y, depth_or_array_layers: 1, }, mip_level_count: 1, @@ -516,7 +539,6 @@ fn composite_cached_regions( let mut combined_metadata = rendering::RenderMetadata::default(); // Calculate viewport pixel offset using round() to match region boundary calculations - let device_scale = physical_scale / logical_scale; let viewport_pixel_start = (viewport_bounds.start * physical_scale).round().as_ivec2(); for region in regions { @@ -527,17 +549,17 @@ fn composite_cached_regions( let offset_pixels = region_pixel_start - viewport_pixel_start; let (src_x, dst_x, width) = if offset_pixels.x >= 0 { - (0, offset_pixels.x as u32, region.texture_size.x.min(output_resolution.x.saturating_sub(offset_pixels.x as u32))) + (0, offset_pixels.x as u32, region.texture_size.x.min(composite_resolution.x.saturating_sub(offset_pixels.x as u32))) } else { let skip = (-offset_pixels.x) as u32; - (skip, 0, region.texture_size.x.saturating_sub(skip).min(output_resolution.x)) + (skip, 0, region.texture_size.x.saturating_sub(skip).min(composite_resolution.x)) }; let (src_y, dst_y, height) = if offset_pixels.y >= 0 { - (0, offset_pixels.y as u32, region.texture_size.y.min(output_resolution.y.saturating_sub(offset_pixels.y as u32))) + (0, offset_pixels.y as u32, region.texture_size.y.min(composite_resolution.y.saturating_sub(offset_pixels.y as u32))) } else { let skip = (-offset_pixels.y) as u32; - (skip, 0, region.texture_size.y.saturating_sub(skip).min(output_resolution.y)) + (skip, 0, region.texture_size.y.saturating_sub(skip).min(composite_resolution.y)) }; if width > 0 && height > 0 { @@ -549,7 +571,7 @@ fn composite_cached_regions( aspect: wgpu::TextureAspect::All, }, wgpu::TexelCopyTextureInfo { - texture: &output_texture, + texture: &composite_texture, mip_level: 0, origin: wgpu::Origin3d { x: dst_x, y: dst_y, z: 0 }, aspect: wgpu::TextureAspect::All, @@ -564,11 +586,29 @@ fn composite_cached_regions( // Transform metadata from document space to viewport logical pixels let mut region_metadata = region.metadata.clone(); - let document_to_viewport = glam::DAffine2::from_scale(DVec2::splat(logical_scale)) * glam::DAffine2::from_translation(-viewport_bounds.start); + let document_to_viewport = if has_tilt { + footprint_transform + } else { + glam::DAffine2::from_scale(DVec2::splat(logical_scale)) * glam::DAffine2::from_translation(-viewport_bounds.start) + }; region_metadata.apply_transform(document_to_viewport); combined_metadata.merge(®ion_metadata); } queue.submit([encoder.finish()]); - (output_texture, combined_metadata) + + if has_tilt { + // Apply viewport rotation by sampling the intermediate document-space texture + // with a bilinear-filtered affine transform to produce the final viewport output. + // For each output pixel p: texel = inv.matrix2 * p * logical_scale + (inv.translation - viewport_bounds.start) * physical_scale + let inv = footprint_transform.inverse(); + let source_transform = glam::Mat2::from_cols((inv.matrix2.x_axis * logical_scale).as_vec2(), (inv.matrix2.y_axis * logical_scale).as_vec2()); + let source_offset = ((inv.translation - viewport_bounds.start) * physical_scale).as_vec2(); + let output_texture = exec + .resample_texture(&composite_texture, output_resolution, source_transform, source_offset, wgpu::FilterMode::Linear) + .expect("Failed to apply viewport tilt rotation"); + (output_texture, combined_metadata) + } else { + (composite_texture, combined_metadata) + } } diff --git a/node-graph/nodes/gstd/src/render_node.rs b/node-graph/nodes/gstd/src/render_node.rs index 735cb4af4a..3972b961e6 100644 --- a/node-graph/nodes/gstd/src/render_node.rs +++ b/node-graph/nodes/gstd/src/render_node.rs @@ -1,7 +1,8 @@ use core_types::table::Table; -use core_types::transform::Footprint; +use core_types::transform::{Footprint, Transform}; use core_types::{CloneVarArgs, ExtractAll, ExtractVarArgs}; use core_types::{Color, Context, Ctx, ExtractFootprint, OwnedContextImpl, WasmNotSend}; +use glam::UVec2; use graph_craft::document::value::RenderOutput; pub use graph_craft::document::value::RenderOutputType; pub use graph_craft::wasm_application_io::*; @@ -16,6 +17,7 @@ use rendering::{RenderMetadata, SvgSegment}; use std::collections::HashMap; use std::sync::Arc; use vector_types::GradientStops; +use vector_types::vector::style::RenderMode; use wgpu_executor::RenderContext; // Re-export render_output_cache from render_cache module @@ -185,8 +187,7 @@ async fn render<'a: 'n>(ctx: impl Ctx + ExtractFootprint + ExtractVarArgs, edito // We now replace all transforms which are supposed to be infinite with a transform which covers the entire viewport // See for more detail let scaled_infinite_transform = vello::kurbo::Affine::scale_non_uniform(physical_resolution.x as f64, physical_resolution.y as f64); - let encoding = scene.encoding_mut(); - for transform in encoding.transforms.iter_mut() { + for transform in scene.encoding_mut().transforms.iter_mut() { if transform.matrix[0] == f32::INFINITY { *transform = vello_encoding::Transform::from_kurbo(&scaled_infinite_transform); } @@ -198,6 +199,76 @@ async fn render<'a: 'n>(ctx: impl Ctx + ExtractFootprint + ExtractVarArgs, edito None }; + // Pixel Preview: render at the 100%-scale in document space, then upscale with nearest-neighbor filtering. + // This shows the actual pixels of the exported image at viewport zoom levels above 100%. + // When the viewport is tilted, the upscale applies the rotation so individual document pixels + // appear as tilted squares, while the document-space render itself stays pixel-aligned (no rotation). + if render_params.render_mode == RenderMode::PixelPreview { + let logical_zoom = footprint.decompose_scale().x; + if logical_zoom > 1. { + let inv = footprint.transform.inverse(); + + // Compute the viewport's axis-aligned bounding box in document space. + // When tilted, the visible area is a rotated rectangle in document space; + // the AABB of that rectangle determines the document render region. + let corners = [ + glam::DVec2::ZERO, + glam::DVec2::new(logical_resolution.x, 0.), + glam::DVec2::new(logical_resolution.x, logical_resolution.y), + glam::DVec2::new(0., logical_resolution.y), + ]; + let doc_corners = corners.map(|c| inv.transform_point2(c)); + let doc_min = doc_corners.iter().copied().reduce(|a, b| a.min(b)).unwrap(); + let doc_max = doc_corners.iter().copied().reduce(|a, b| a.max(b)).unwrap(); + + // Snap the document render origin to integer document pixels for stable antialiasing. + // This ensures paths always sit at exact document-pixel-aligned positions in the 100%-scale render, + // so the antialiasing pattern doesn't flicker as you zoom or pan slightly. + let snapped_doc_origin = doc_min.floor(); + let doc_size = doc_max - snapped_doc_origin; + + // Document render transform: no zoom, no rotation — only translation at DPI scale. + // This renders the document pixel-aligned at 100% scale. + let document_render_transform = glam::DAffine2::from_translation(-snapped_doc_origin); + let document_transform_vello = vello::kurbo::Affine::new((scale_transform * document_render_transform).to_cols_array()); + + let extra = scale.ceil() as u32 + 2; + let document_resolution = UVec2::new((doc_size.x * scale).ceil() as u32 + extra, (doc_size.y * scale).ceil() as u32 + extra); + + let mut scene = vello::Scene::new(); + scene.append(child, Some(document_transform_vello)); + + // Same infinite-transform fix as in the normal render path (see comment there), but sized to + // the document-resolution render target instead of the full viewport resolution. + let scaled_infinite_transform = vello::kurbo::Affine::scale_non_uniform(document_resolution.x as f64, document_resolution.y as f64); + for transform in scene.encoding_mut().transforms.iter_mut() { + if transform.matrix[0] == f32::INFINITY { + *transform = vello_encoding::Transform::from_kurbo(&scaled_infinite_transform); + } + } + + let document_texture = exec + .render_vello_scene_to_texture(&scene, document_resolution, context, background) + .await + .expect("Failed to render Vello scene for pixel preview"); + + // Map output viewport pixels to document texture coordinates. + // source_transform = footprint.inverse().matrix2 (encodes rotation and 1/zoom). + // source_offset = DPI * (viewport_origin_in_doc_space - snapped_doc_origin). + // For each output pixel p: tex_coord = source_transform * p + source_offset. + let source_transform = glam::Mat2::from_cols(inv.matrix2.x_axis.as_vec2(), inv.matrix2.y_axis.as_vec2()); + let source_offset = ((inv.translation - snapped_doc_origin) * scale).as_vec2(); + let texture = exec + .resample_texture(&document_texture, physical_resolution, source_transform, source_offset, wgpu::FilterMode::Nearest) + .expect("Failed to upscale pixel preview texture"); + + return RenderOutput { + data: RenderOutputType::Texture(ImageTexture { texture }), + metadata, + }; + } + } + let texture = exec .render_vello_scene_to_texture(&scene, physical_resolution, context, background) .await