This commit is contained in:
nora 2023-04-18 15:38:14 +02:00
parent 12163d1338
commit 550b1644cb
363 changed files with 84081 additions and 16 deletions

View file

@ -0,0 +1,40 @@
# Changelog for egui-wgpu
All notable changes to the `egui-wgpu` integration will be noted in this file.
## Unreleased
* Add `read_screen_rgba` to the egui-wgpu `Painter`, to allow for capturing the current frame when using wgpu. Used in conjunction with `Frame::request_screenshot`. ([#2676](https://github.com/emilk/egui/pull/2676))
* Improve performance of `update_buffers` ([#2820](https://github.com/emilk/egui/pull/2820))
* Added support for multisampling (MSAA) ([#2878](https://github.com/emilk/egui/pull/2878))
## 0.21.0 - 2023-02-08
* Update to `wgpu` 0.15 ([#2629](https://github.com/emilk/egui/pull/2629))
* Return `Err` instead of panic if we can't find a device ([#2428](https://github.com/emilk/egui/pull/2428)).
* `winit::Painter::set_window` is now `async` ([#2434](https://github.com/emilk/egui/pull/2434)).
* `egui-wgpu` now only depends on `epaint` instead of the entire `egui` ([#2438](https://github.com/emilk/egui/pull/2438)).
* `winit::Painter` now supports transparent backbuffer ([#2684](https://github.com/emilk/egui/pull/2684)).
## 0.20.0 - 2022-12-08 - web support
* Renamed `RenderPass` to `Renderer`.
* Renamed `RenderPass::execute` to `RenderPass::render`.
* Renamed `RenderPass::execute_with_renderpass` to `Renderer::render` (replacing existing `Renderer::render`)
* Reexported `Renderer`.
* You can now use `egui-wgpu` on web, using WebGL ([#2107](https://github.com/emilk/egui/pull/2107)).
* `Renderer` no longer handles pass creation and depth buffer creation ([#2136](https://github.com/emilk/egui/pull/2136))
* `PrepareCallback` now passes `wgpu::CommandEncoder` ([#2136](https://github.com/emilk/egui/pull/2136))
* `PrepareCallback` can now returns `wgpu::CommandBuffer` that are bundled into a single `wgpu::Queue::submit` call ([#2230](https://github.com/emilk/egui/pull/2230))
* Only a single vertex & index buffer is now created and resized when necessary (previously, vertex/index buffers were allocated for every mesh) ([#2148](https://github.com/emilk/egui/pull/2148)).
* `Renderer::update_texture` no longer creates a new `wgpu::Sampler` with every new texture ([#2198](https://github.com/emilk/egui/pull/2198))
* `Painter`'s instance/device/adapter/surface creation is now configurable via `WgpuConfiguration` ([#2207](https://github.com/emilk/egui/pull/2207))
* Fix panic on using a depth buffer ([#2316](https://github.com/emilk/egui/pull/2316))
## 0.19.0 - 2022-08-20
* Enables deferred render + surface state initialization for Android ([#1634](https://github.com/emilk/egui/pull/1634)).
* Make `RenderPass` `Send` and `Sync` ([#1883](https://github.com/emilk/egui/pull/1883)).
## 0.18.0 - 2022-05-15
First published version since moving the code into the `egui` repository from <https://github.com/LU15W1R7H/eww>.

View file

@ -0,0 +1,56 @@
[package]
name = "egui-wgpu"
version = "0.21.0"
description = "Bindings for using egui natively using the wgpu library"
authors = [
"Nils Hasenbanck <nils@hasenbanck.de>",
"embotech <opensource@embotech.com>",
"Emil Ernerfeldt <emil.ernerfeldt@gmail.com>",
]
edition = "2021"
rust-version = "1.65"
homepage = "https://github.com/emilk/egui/tree/master/crates/egui-wgpu"
license = "MIT OR Apache-2.0"
readme = "README.md"
repository = "https://github.com/emilk/egui/tree/master/crates/egui-wgpu"
categories = ["gui", "game-development"]
keywords = ["wgpu", "egui", "gui", "gamedev"]
include = [
"../LICENSE-APACHE",
"../LICENSE-MIT",
"**/*.rs",
"**/*.wgsl",
"Cargo.toml",
]
[package.metadata.docs.rs]
all-features = true
[features]
## Enable profiling with the [`puffin`](https://docs.rs/puffin) crate.
puffin = ["dep:puffin"]
## Enable [`winit`](https://docs.rs/winit) integration.
winit = ["dep:winit"]
[dependencies]
epaint = { version = "0.21.0", path = "../epaint", default-features = false, features = [
"bytemuck",
] }
bytemuck = "1.7"
tracing = { version = "0.1", default-features = false, features = ["std"] }
type-map = "0.5.0"
wgpu = "0.15.0"
#! ### Optional dependencies
## Enable this when generating docs.
document-features = { version = "0.2", optional = true }
winit = { version = "0.28", optional = true }
# Native:
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
puffin = { version = "0.14", optional = true }

View file

@ -0,0 +1,10 @@
# egui-wgpu
[![Latest version](https://img.shields.io/crates/v/egui-wgpu.svg)](https://crates.io/crates/egui-wgpu)
[![Documentation](https://docs.rs/egui-wgpu/badge.svg)](https://docs.rs/egui-wgpu)
![MIT](https://img.shields.io/badge/license-MIT-blue.svg)
![Apache](https://img.shields.io/badge/license-Apache-blue.svg)
This crates provides bindings between [`egui`](https://github.com/emilk/egui) and [wgpu](https://crates.io/crates/wgpu).
This was originally hosted at https://github.com/hasenbanck/egui_wgpu_backend

View file

@ -0,0 +1,91 @@
// Vertex shader bindings
struct VertexOutput {
@location(0) tex_coord: vec2<f32>,
@location(1) color: vec4<f32>, // gamma 0-1
@builtin(position) position: vec4<f32>,
};
struct Locals {
screen_size: vec2<f32>,
// Uniform buffers need to be at least 16 bytes in WebGL.
// See https://github.com/gfx-rs/wgpu/issues/2072
_padding: vec2<u32>,
};
@group(0) @binding(0) var<uniform> r_locals: Locals;
// 0-1 linear from 0-1 sRGB gamma
fn linear_from_gamma_rgb(srgb: vec3<f32>) -> vec3<f32> {
let cutoff = srgb < vec3<f32>(0.04045);
let lower = srgb / vec3<f32>(12.92);
let higher = pow((srgb + vec3<f32>(0.055)) / vec3<f32>(1.055), vec3<f32>(2.4));
return select(higher, lower, cutoff);
}
// 0-1 sRGB gamma from 0-1 linear
fn gamma_from_linear_rgb(rgb: vec3<f32>) -> vec3<f32> {
let cutoff = rgb < vec3<f32>(0.0031308);
let lower = rgb * vec3<f32>(12.92);
let higher = vec3<f32>(1.055) * pow(rgb, vec3<f32>(1.0 / 2.4)) - vec3<f32>(0.055);
return select(higher, lower, cutoff);
}
// 0-1 sRGBA gamma from 0-1 linear
fn gamma_from_linear_rgba(linear_rgba: vec4<f32>) -> vec4<f32> {
return vec4<f32>(gamma_from_linear_rgb(linear_rgba.rgb), linear_rgba.a);
}
// [u8; 4] SRGB as u32 -> [r, g, b, a] in 0.-1
fn unpack_color(color: u32) -> vec4<f32> {
return vec4<f32>(
f32(color & 255u),
f32((color >> 8u) & 255u),
f32((color >> 16u) & 255u),
f32((color >> 24u) & 255u),
) / 255.0;
}
fn position_from_screen(screen_pos: vec2<f32>) -> vec4<f32> {
return vec4<f32>(
2.0 * screen_pos.x / r_locals.screen_size.x - 1.0,
1.0 - 2.0 * screen_pos.y / r_locals.screen_size.y,
0.0,
1.0,
);
}
@vertex
fn vs_main(
@location(0) a_pos: vec2<f32>,
@location(1) a_tex_coord: vec2<f32>,
@location(2) a_color: u32,
) -> VertexOutput {
var out: VertexOutput;
out.tex_coord = a_tex_coord;
out.color = unpack_color(a_color);
out.position = position_from_screen(a_pos);
return out;
}
// Fragment shader bindings
@group(1) @binding(0) var r_tex_color: texture_2d<f32>;
@group(1) @binding(1) var r_tex_sampler: sampler;
@fragment
fn fs_main_linear_framebuffer(in: VertexOutput) -> @location(0) vec4<f32> {
// We always have an sRGB aware texture at the moment.
let tex_linear = textureSample(r_tex_color, r_tex_sampler, in.tex_coord);
let tex_gamma = gamma_from_linear_rgba(tex_linear);
let out_color_gamma = in.color * tex_gamma;
return vec4<f32>(linear_from_gamma_rgb(out_color_gamma.rgb), out_color_gamma.a);
}
@fragment
fn fs_main_gamma_framebuffer(in: VertexOutput) -> @location(0) vec4<f32> {
// We always have an sRGB aware texture at the moment.
let tex_linear = textureSample(r_tex_color, r_tex_sampler, in.tex_coord);
let tex_gamma = gamma_from_linear_rgba(tex_linear);
let out_color_gamma = in.color * tex_gamma;
return out_color_gamma;
}

View file

@ -0,0 +1,155 @@
//! This crates provides bindings between [`egui`](https://github.com/emilk/egui) and [wgpu](https://crates.io/crates/wgpu).
//!
//! ## Feature flags
#![cfg_attr(feature = "document-features", doc = document_features::document_features!())]
//!
#![allow(unsafe_code)]
pub use wgpu;
/// Low-level painting of [`egui`](https://github.com/emilk/egui) on [`wgpu`].
pub mod renderer;
pub use renderer::CallbackFn;
pub use renderer::Renderer;
/// Module for painting [`egui`](https://github.com/emilk/egui) with [`wgpu`] on [`winit`].
#[cfg(feature = "winit")]
pub mod winit;
use std::sync::Arc;
use epaint::mutex::RwLock;
/// Access to the render state for egui.
#[derive(Clone)]
pub struct RenderState {
pub device: Arc<wgpu::Device>,
pub queue: Arc<wgpu::Queue>,
pub target_format: wgpu::TextureFormat,
pub renderer: Arc<RwLock<Renderer>>,
}
/// Specifies which action should be taken as consequence of a [`wgpu::SurfaceError`]
pub enum SurfaceErrorAction {
/// Do nothing and skip the current frame.
SkipFrame,
/// Instructs egui to recreate the surface, then skip the current frame.
RecreateSurface,
}
/// Configuration for using wgpu with eframe or the egui-wgpu winit feature.
#[derive(Clone)]
pub struct WgpuConfiguration {
/// Configuration passed on device request.
pub device_descriptor: wgpu::DeviceDescriptor<'static>,
/// Backends that should be supported (wgpu will pick one of these)
pub backends: wgpu::Backends,
/// Present mode used for the primary surface.
pub present_mode: wgpu::PresentMode,
/// Power preference for the adapter.
pub power_preference: wgpu::PowerPreference,
/// Callback for surface errors.
pub on_surface_error: Arc<dyn Fn(wgpu::SurfaceError) -> SurfaceErrorAction>,
pub depth_format: Option<wgpu::TextureFormat>,
}
impl Default for WgpuConfiguration {
fn default() -> Self {
Self {
device_descriptor: wgpu::DeviceDescriptor {
label: Some("egui wgpu device"),
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
},
backends: wgpu::Backends::PRIMARY | wgpu::Backends::GL,
present_mode: wgpu::PresentMode::AutoVsync,
power_preference: wgpu::PowerPreference::HighPerformance,
depth_format: None,
on_surface_error: Arc::new(|err| {
if err == wgpu::SurfaceError::Outdated {
// This error occurs when the app is minimized on Windows.
// Silently return here to prevent spamming the console with:
// "The underlying surface has changed, and therefore the swap chain must be updated"
} else {
tracing::warn!("Dropped frame with error: {err}");
}
SurfaceErrorAction::SkipFrame
}),
}
}
}
/// Find the framebuffer format that egui prefers
pub fn preferred_framebuffer_format(formats: &[wgpu::TextureFormat]) -> wgpu::TextureFormat {
for &format in formats {
if matches!(
format,
wgpu::TextureFormat::Rgba8Unorm | wgpu::TextureFormat::Bgra8Unorm
) {
return format;
}
}
formats[0] // take the first
}
// maybe use this-error?
#[derive(Debug)]
pub enum WgpuError {
DeviceError(wgpu::RequestDeviceError),
SurfaceError(wgpu::CreateSurfaceError),
}
impl std::fmt::Display for WgpuError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Debug::fmt(self, f)
}
}
impl std::error::Error for WgpuError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
WgpuError::DeviceError(e) => e.source(),
WgpuError::SurfaceError(e) => e.source(),
}
}
}
impl From<wgpu::RequestDeviceError> for WgpuError {
fn from(e: wgpu::RequestDeviceError) -> Self {
Self::DeviceError(e)
}
}
impl From<wgpu::CreateSurfaceError> for WgpuError {
fn from(e: wgpu::CreateSurfaceError) -> Self {
Self::SurfaceError(e)
}
}
// ---------------------------------------------------------------------------
/// Profiling macro for feature "puffin"
macro_rules! profile_function {
($($arg: tt)*) => {
#[cfg(feature = "puffin")]
#[cfg(not(target_arch = "wasm32"))]
puffin::profile_function!($($arg)*);
};
}
pub(crate) use profile_function;
/// Profiling macro for feature "puffin"
macro_rules! profile_scope {
($($arg: tt)*) => {
#[cfg(feature = "puffin")]
#[cfg(not(target_arch = "wasm32"))]
puffin::profile_scope!($($arg)*);
};
}
pub(crate) use profile_scope;

View file

@ -0,0 +1,979 @@
#![allow(unsafe_code)]
use std::num::NonZeroU64;
use std::ops::Range;
use std::{borrow::Cow, collections::HashMap, num::NonZeroU32};
use type_map::concurrent::TypeMap;
use wgpu;
use wgpu::util::DeviceExt as _;
use epaint::{emath::NumExt, PaintCallbackInfo, Primitive, Vertex};
/// A callback function that can be used to compose an [`epaint::PaintCallback`] for custom WGPU
/// rendering.
///
/// The callback is composed of two functions: `prepare` and `paint`:
/// - `prepare` is called every frame before `paint`, and can use the passed-in
/// [`wgpu::Device`] and [`wgpu::Buffer`] to allocate or modify GPU resources such as buffers.
/// - `paint` is called after `prepare` and is given access to the [`wgpu::RenderPass`] so
/// that it can issue draw commands into the same [`wgpu::RenderPass`] that is used for
/// all other egui elements.
///
/// The final argument of both the `prepare` and `paint` callbacks is a the
/// [`paint_callback_resources`][crate::renderer::Renderer::paint_callback_resources].
/// `paint_callback_resources` has the same lifetime as the Egui render pass, so it can be used to
/// store buffers, pipelines, and other information that needs to be accessed during the render
/// pass.
///
/// # Example
///
/// See the [`custom3d_wgpu`](https://github.com/emilk/egui/blob/master/crates/egui_demo_app/src/apps/custom3d_wgpu.rs) demo source for a detailed usage example.
pub struct CallbackFn {
prepare: Box<PrepareCallback>,
paint: Box<PaintCallback>,
}
type PrepareCallback = dyn Fn(
&wgpu::Device,
&wgpu::Queue,
&mut wgpu::CommandEncoder,
&mut TypeMap,
) -> Vec<wgpu::CommandBuffer>
+ Sync
+ Send;
type PaintCallback =
dyn for<'a, 'b> Fn(PaintCallbackInfo, &'a mut wgpu::RenderPass<'b>, &'b TypeMap) + Sync + Send;
impl Default for CallbackFn {
fn default() -> Self {
CallbackFn {
prepare: Box::new(|_, _, _, _| Vec::new()),
paint: Box::new(|_, _, _| ()),
}
}
}
impl CallbackFn {
pub fn new() -> Self {
Self::default()
}
/// Set the prepare callback.
///
/// The passed-in `CommandEncoder` is egui's and can be used directly to register
/// wgpu commands for simple use cases.
/// This allows reusing the same [`wgpu::CommandEncoder`] for all callbacks and egui
/// rendering itself.
///
/// For more complicated use cases, one can also return a list of arbitrary
/// `CommandBuffer`s and have complete control over how they get created and fed.
/// In particular, this gives an opportunity to parallelize command registration and
/// prevents a faulty callback from poisoning the main wgpu pipeline.
///
/// When using eframe, the main egui command buffer, as well as all user-defined
/// command buffers returned by this function, are guaranteed to all be submitted
/// at once in a single call.
pub fn prepare<F>(mut self, prepare: F) -> Self
where
F: Fn(
&wgpu::Device,
&wgpu::Queue,
&mut wgpu::CommandEncoder,
&mut TypeMap,
) -> Vec<wgpu::CommandBuffer>
+ Sync
+ Send
+ 'static,
{
self.prepare = Box::new(prepare) as _;
self
}
/// Set the paint callback
pub fn paint<F>(mut self, paint: F) -> Self
where
F: for<'a, 'b> Fn(PaintCallbackInfo, &'a mut wgpu::RenderPass<'b>, &'b TypeMap)
+ Sync
+ Send
+ 'static,
{
self.paint = Box::new(paint) as _;
self
}
}
/// Information about the screen used for rendering.
pub struct ScreenDescriptor {
/// Size of the window in physical pixels.
pub size_in_pixels: [u32; 2],
/// HiDPI scale factor (pixels per point).
pub pixels_per_point: f32,
}
impl ScreenDescriptor {
/// size in "logical" points
fn screen_size_in_points(&self) -> [f32; 2] {
[
self.size_in_pixels[0] as f32 / self.pixels_per_point,
self.size_in_pixels[1] as f32 / self.pixels_per_point,
]
}
}
/// Uniform buffer used when rendering.
#[derive(Clone, Copy, Debug, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
struct UniformBuffer {
screen_size_in_points: [f32; 2],
// Uniform buffers need to be at least 16 bytes in WebGL.
// See https://github.com/gfx-rs/wgpu/issues/2072
_padding: [u32; 2],
}
impl PartialEq for UniformBuffer {
fn eq(&self, other: &Self) -> bool {
self.screen_size_in_points == other.screen_size_in_points
}
}
struct SlicedBuffer {
buffer: wgpu::Buffer,
slices: Vec<Range<usize>>,
capacity: wgpu::BufferAddress,
}
/// Renderer for a egui based GUI.
pub struct Renderer {
pipeline: wgpu::RenderPipeline,
index_buffer: SlicedBuffer,
vertex_buffer: SlicedBuffer,
uniform_buffer: wgpu::Buffer,
previous_uniform_buffer_content: UniformBuffer,
uniform_bind_group: wgpu::BindGroup,
texture_bind_group_layout: wgpu::BindGroupLayout,
/// Map of egui texture IDs to textures and their associated bindgroups (texture view +
/// sampler). The texture may be None if the TextureId is just a handle to a user-provided
/// sampler.
textures: HashMap<epaint::TextureId, (Option<wgpu::Texture>, wgpu::BindGroup)>,
next_user_texture_id: u64,
samplers: HashMap<epaint::textures::TextureOptions, wgpu::Sampler>,
/// Storage for use by [`epaint::PaintCallback`]'s that need to store resources such as render
/// pipelines that must have the lifetime of the renderpass.
pub paint_callback_resources: TypeMap,
}
impl Renderer {
/// Creates a renderer for a egui UI.
///
/// `output_color_format` should preferably be [`wgpu::TextureFormat::Rgba8Unorm`] or
/// [`wgpu::TextureFormat::Bgra8Unorm`], i.e. in gamma-space.
pub fn new(
device: &wgpu::Device,
output_color_format: wgpu::TextureFormat,
output_depth_format: Option<wgpu::TextureFormat>,
msaa_samples: u32,
) -> Self {
crate::profile_function!();
let shader = wgpu::ShaderModuleDescriptor {
label: Some("egui"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("egui.wgsl"))),
};
let module = device.create_shader_module(shader);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("egui_uniform_buffer"),
contents: bytemuck::cast_slice(&[UniformBuffer {
screen_size_in_points: [0.0, 0.0],
_padding: Default::default(),
}]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("egui_uniform_bind_group_layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: NonZeroU64::new(std::mem::size_of::<UniformBuffer>() as _),
ty: wgpu::BufferBindingType::Uniform,
},
count: None,
}],
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("egui_uniform_bind_group"),
layout: &uniform_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &uniform_buffer,
offset: 0,
size: None,
}),
}],
});
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("egui_texture_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("egui_pipeline_layout"),
bind_group_layouts: &[&uniform_bind_group_layout, &texture_bind_group_layout],
push_constant_ranges: &[],
});
let depth_stencil = output_depth_format.map(|format| wgpu::DepthStencilState {
format,
depth_write_enabled: false,
depth_compare: wgpu::CompareFunction::Always,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("egui_pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
entry_point: "vs_main",
module: &module,
buffers: &[wgpu::VertexBufferLayout {
array_stride: 5 * 4,
step_mode: wgpu::VertexStepMode::Vertex,
// 0: vec2 position
// 1: vec2 texture coordinates
// 2: uint color
attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x2, 2 => Uint32],
}],
},
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
unclipped_depth: false,
conservative: false,
cull_mode: None,
front_face: wgpu::FrontFace::default(),
polygon_mode: wgpu::PolygonMode::default(),
strip_index_format: None,
},
depth_stencil,
multisample: wgpu::MultisampleState {
alpha_to_coverage_enabled: false,
count: msaa_samples,
mask: !0,
},
fragment: Some(wgpu::FragmentState {
module: &module,
entry_point: if output_color_format.describe().srgb {
tracing::warn!("Detected a linear (sRGBA aware) framebuffer {:?}. egui prefers Rgba8Unorm or Bgra8Unorm", output_color_format);
"fs_main_linear_framebuffer"
} else {
"fs_main_gamma_framebuffer" // this is what we prefer
},
targets: &[Some(wgpu::ColorTargetState {
format: output_color_format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::OneMinusDstAlpha,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
multiview: None,
});
const VERTEX_BUFFER_START_CAPACITY: wgpu::BufferAddress =
(std::mem::size_of::<Vertex>() * 1024) as _;
const INDEX_BUFFER_START_CAPACITY: wgpu::BufferAddress =
(std::mem::size_of::<u32>() * 1024 * 3) as _;
Self {
pipeline,
vertex_buffer: SlicedBuffer {
buffer: create_vertex_buffer(device, VERTEX_BUFFER_START_CAPACITY),
slices: Vec::with_capacity(64),
capacity: VERTEX_BUFFER_START_CAPACITY,
},
index_buffer: SlicedBuffer {
buffer: create_index_buffer(device, INDEX_BUFFER_START_CAPACITY),
slices: Vec::with_capacity(64),
capacity: INDEX_BUFFER_START_CAPACITY,
},
uniform_buffer,
// Buffers on wgpu are zero initialized, so this is indeed its current state!
previous_uniform_buffer_content: UniformBuffer {
screen_size_in_points: [0.0, 0.0],
_padding: [0, 0],
},
uniform_bind_group,
texture_bind_group_layout,
textures: HashMap::new(),
next_user_texture_id: 0,
samplers: HashMap::new(),
paint_callback_resources: TypeMap::default(),
}
}
/// Executes the egui renderer onto an existing wgpu renderpass.
pub fn render<'rp>(
&'rp self,
render_pass: &mut wgpu::RenderPass<'rp>,
paint_jobs: &[epaint::ClippedPrimitive],
screen_descriptor: &ScreenDescriptor,
) {
crate::profile_function!();
let pixels_per_point = screen_descriptor.pixels_per_point;
let size_in_pixels = screen_descriptor.size_in_pixels;
// Whether or not we need to reset the render pass because a paint callback has just
// run.
let mut needs_reset = true;
let mut index_buffer_slices = self.index_buffer.slices.iter();
let mut vertex_buffer_slices = self.vertex_buffer.slices.iter();
for epaint::ClippedPrimitive {
clip_rect,
primitive,
} in paint_jobs
{
if needs_reset {
render_pass.set_viewport(
0.0,
0.0,
size_in_pixels[0] as f32,
size_in_pixels[1] as f32,
0.0,
1.0,
);
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &self.uniform_bind_group, &[]);
needs_reset = false;
}
{
let rect = ScissorRect::new(clip_rect, pixels_per_point, size_in_pixels);
if rect.width == 0 || rect.height == 0 {
// Skip rendering zero-sized clip areas.
if let Primitive::Mesh(_) = primitive {
// If this is a mesh, we need to advance the index and vertex buffer iterators:
index_buffer_slices.next().unwrap();
vertex_buffer_slices.next().unwrap();
}
continue;
}
render_pass.set_scissor_rect(rect.x, rect.y, rect.width, rect.height);
}
match primitive {
Primitive::Mesh(mesh) => {
let index_buffer_slice = index_buffer_slices.next().unwrap();
let vertex_buffer_slice = vertex_buffer_slices.next().unwrap();
if let Some((_texture, bind_group)) = self.textures.get(&mesh.texture_id) {
render_pass.set_bind_group(1, bind_group, &[]);
render_pass.set_index_buffer(
self.index_buffer.buffer.slice(
index_buffer_slice.start as u64..index_buffer_slice.end as u64,
),
wgpu::IndexFormat::Uint32,
);
render_pass.set_vertex_buffer(
0,
self.vertex_buffer.buffer.slice(
vertex_buffer_slice.start as u64..vertex_buffer_slice.end as u64,
),
);
render_pass.draw_indexed(0..mesh.indices.len() as u32, 0, 0..1);
} else {
tracing::warn!("Missing texture: {:?}", mesh.texture_id);
}
}
Primitive::Callback(callback) => {
let cbfn = if let Some(c) = callback.callback.downcast_ref::<CallbackFn>() {
c
} else {
// We already warned in the `prepare` callback
continue;
};
if callback.rect.is_positive() {
crate::profile_scope!("callback");
needs_reset = true;
{
// We're setting a default viewport for the render pass as a
// courtesy for the user, so that they don't have to think about
// it in the simple case where they just want to fill the whole
// paint area.
//
// The user still has the possibility of setting their own custom
// viewport during the paint callback, effectively overriding this
// one.
let min = (callback.rect.min.to_vec2() * pixels_per_point).round();
let max = (callback.rect.max.to_vec2() * pixels_per_point).round();
render_pass.set_viewport(
min.x,
min.y,
max.x - min.x,
max.y - min.y,
0.0,
1.0,
);
}
(cbfn.paint)(
PaintCallbackInfo {
viewport: callback.rect,
clip_rect: *clip_rect,
pixels_per_point,
screen_size_px: size_in_pixels,
},
render_pass,
&self.paint_callback_resources,
);
}
}
}
}
render_pass.set_scissor_rect(0, 0, size_in_pixels[0], size_in_pixels[1]);
}
/// Should be called before `render()`.
pub fn update_texture(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
id: epaint::TextureId,
image_delta: &epaint::ImageDelta,
) {
crate::profile_function!();
let width = image_delta.image.width() as u32;
let height = image_delta.image.height() as u32;
let size = wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
};
let data_color32 = match &image_delta.image {
epaint::ImageData::Color(image) => {
assert_eq!(
width as usize * height as usize,
image.pixels.len(),
"Mismatch between texture size and texel count"
);
Cow::Borrowed(&image.pixels)
}
epaint::ImageData::Font(image) => {
assert_eq!(
width as usize * height as usize,
image.pixels.len(),
"Mismatch between texture size and texel count"
);
Cow::Owned(image.srgba_pixels(None).collect::<Vec<_>>())
}
};
let data_bytes: &[u8] = bytemuck::cast_slice(data_color32.as_slice());
let queue_write_data_to_texture = |texture, origin| {
queue.write_texture(
wgpu::ImageCopyTexture {
texture,
mip_level: 0,
origin,
aspect: wgpu::TextureAspect::All,
},
data_bytes,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: NonZeroU32::new(4 * width),
rows_per_image: NonZeroU32::new(height),
},
size,
);
};
if let Some(pos) = image_delta.pos {
// update the existing texture
let (texture, _bind_group) = self
.textures
.get(&id)
.expect("Tried to update a texture that has not been allocated yet.");
let origin = wgpu::Origin3d {
x: pos[0] as u32,
y: pos[1] as u32,
z: 0,
};
queue_write_data_to_texture(
texture.as_ref().expect("Tried to update user texture."),
origin,
);
} else {
// allocate a new texture
// Use same label for all resources associated with this texture id (no point in retyping the type)
let label_str = format!("egui_texid_{:?}", id);
let label = Some(label_str.as_str());
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb, // Minspec for wgpu WebGL emulation is WebGL2, so this should always be supported.
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[wgpu::TextureFormat::Rgba8UnormSrgb],
});
let sampler = self
.samplers
.entry(image_delta.options)
.or_insert_with(|| create_sampler(image_delta.options, device));
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label,
layout: &self.texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(sampler),
},
],
});
let origin = wgpu::Origin3d::ZERO;
queue_write_data_to_texture(&texture, origin);
self.textures.insert(id, (Some(texture), bind_group));
};
}
pub fn free_texture(&mut self, id: &epaint::TextureId) {
self.textures.remove(id);
}
/// Get the WGPU texture and bind group associated to a texture that has been allocated by egui.
///
/// This could be used by custom paint hooks to render images that have been added through with
/// [`egui_extras::RetainedImage`](https://docs.rs/egui_extras/latest/egui_extras/image/struct.RetainedImage.html)
/// or [`epaint::Context::load_texture`](https://docs.rs/egui/latest/egui/struct.Context.html#method.load_texture).
pub fn texture(
&self,
id: &epaint::TextureId,
) -> Option<&(Option<wgpu::Texture>, wgpu::BindGroup)> {
self.textures.get(id)
}
/// Registers a `wgpu::Texture` with a `epaint::TextureId`.
///
/// This enables the application to reference the texture inside an image ui element.
/// This effectively enables off-screen rendering inside the egui UI. Texture must have
/// the texture format `TextureFormat::Rgba8UnormSrgb` and
/// Texture usage `TextureUsage::SAMPLED`.
pub fn register_native_texture(
&mut self,
device: &wgpu::Device,
texture: &wgpu::TextureView,
texture_filter: wgpu::FilterMode,
) -> epaint::TextureId {
self.register_native_texture_with_sampler_options(
device,
texture,
wgpu::SamplerDescriptor {
label: Some(format!("egui_user_image_{}", self.next_user_texture_id).as_str()),
mag_filter: texture_filter,
min_filter: texture_filter,
..Default::default()
},
)
}
/// Registers a `wgpu::Texture` with an existing `epaint::TextureId`.
///
/// This enables applications to reuse `TextureId`s.
pub fn update_egui_texture_from_wgpu_texture(
&mut self,
device: &wgpu::Device,
texture: &wgpu::TextureView,
texture_filter: wgpu::FilterMode,
id: epaint::TextureId,
) {
self.update_egui_texture_from_wgpu_texture_with_sampler_options(
device,
texture,
wgpu::SamplerDescriptor {
label: Some(format!("egui_user_image_{}", self.next_user_texture_id).as_str()),
mag_filter: texture_filter,
min_filter: texture_filter,
..Default::default()
},
id,
);
}
/// Registers a `wgpu::Texture` with a `epaint::TextureId` while also accepting custom
/// `wgpu::SamplerDescriptor` options.
///
/// This allows applications to specify individual minification/magnification filters as well as
/// custom mipmap and tiling options.
///
/// The `Texture` must have the format `TextureFormat::Rgba8UnormSrgb` and usage
/// `TextureUsage::SAMPLED`. Any compare function supplied in the `SamplerDescriptor` will be
/// ignored.
#[allow(clippy::needless_pass_by_value)] // false positive
pub fn register_native_texture_with_sampler_options(
&mut self,
device: &wgpu::Device,
texture: &wgpu::TextureView,
sampler_descriptor: wgpu::SamplerDescriptor<'_>,
) -> epaint::TextureId {
crate::profile_function!();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
compare: None,
..sampler_descriptor
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some(format!("egui_user_image_{}", self.next_user_texture_id).as_str()),
layout: &self.texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(texture),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
});
let id = epaint::TextureId::User(self.next_user_texture_id);
self.textures.insert(id, (None, bind_group));
self.next_user_texture_id += 1;
id
}
/// Registers a `wgpu::Texture` with an existing `epaint::TextureId` while also accepting custom
/// `wgpu::SamplerDescriptor` options.
///
/// This allows applications to reuse `TextureId`s created with custom sampler options.
#[allow(clippy::needless_pass_by_value)] // false positive
pub fn update_egui_texture_from_wgpu_texture_with_sampler_options(
&mut self,
device: &wgpu::Device,
texture: &wgpu::TextureView,
sampler_descriptor: wgpu::SamplerDescriptor<'_>,
id: epaint::TextureId,
) {
crate::profile_function!();
let (_user_texture, user_texture_binding) = self
.textures
.get_mut(&id)
.expect("Tried to update a texture that has not been allocated yet.");
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
compare: None,
..sampler_descriptor
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some(format!("egui_user_image_{}", self.next_user_texture_id).as_str()),
layout: &self.texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(texture),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
});
*user_texture_binding = bind_group;
}
/// Uploads the uniform, vertex and index data used by the renderer.
/// Should be called before `render()`.
///
/// Returns all user-defined command buffers gathered from prepare callbacks.
pub fn update_buffers(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
paint_jobs: &[epaint::ClippedPrimitive],
screen_descriptor: &ScreenDescriptor,
) -> Vec<wgpu::CommandBuffer> {
crate::profile_function!();
let screen_size_in_points = screen_descriptor.screen_size_in_points();
let uniform_buffer_content = UniformBuffer {
screen_size_in_points,
_padding: Default::default(),
};
if uniform_buffer_content != self.previous_uniform_buffer_content {
crate::profile_scope!("update uniforms");
queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[uniform_buffer_content]),
);
self.previous_uniform_buffer_content = uniform_buffer_content;
}
// Determine how many vertices & indices need to be rendered.
let (vertex_count, index_count) = {
crate::profile_scope!("count_vertices_indices");
paint_jobs.iter().fold((0, 0), |acc, clipped_primitive| {
match &clipped_primitive.primitive {
Primitive::Mesh(mesh) => {
(acc.0 + mesh.vertices.len(), acc.1 + mesh.indices.len())
}
Primitive::Callback(_) => acc,
}
})
};
if index_count > 0 {
crate::profile_scope!("indices");
self.index_buffer.slices.clear();
let required_index_buffer_size = (std::mem::size_of::<u32>() * index_count) as u64;
if self.index_buffer.capacity < required_index_buffer_size {
// Resize index buffer if needed.
self.index_buffer.capacity =
(self.index_buffer.capacity * 2).at_least(required_index_buffer_size);
self.index_buffer.buffer = create_index_buffer(device, self.index_buffer.capacity);
}
let mut index_buffer_staging = queue
.write_buffer_with(
&self.index_buffer.buffer,
0,
NonZeroU64::new(required_index_buffer_size).unwrap(),
)
.expect("Failed to create staging buffer for index data");
let mut index_offset = 0;
for epaint::ClippedPrimitive { primitive, .. } in paint_jobs.iter() {
match primitive {
Primitive::Mesh(mesh) => {
let size = mesh.indices.len() * std::mem::size_of::<u32>();
let slice = index_offset..(size + index_offset);
index_buffer_staging[slice.clone()]
.copy_from_slice(bytemuck::cast_slice(&mesh.indices));
self.index_buffer.slices.push(slice);
index_offset += size;
}
Primitive::Callback(_) => {}
}
}
}
if vertex_count > 0 {
crate::profile_scope!("vertices");
self.vertex_buffer.slices.clear();
let required_vertex_buffer_size = (std::mem::size_of::<Vertex>() * vertex_count) as u64;
if self.vertex_buffer.capacity < required_vertex_buffer_size {
// Resize vertex buffer if needed.
self.vertex_buffer.capacity =
(self.vertex_buffer.capacity * 2).at_least(required_vertex_buffer_size);
self.vertex_buffer.buffer =
create_vertex_buffer(device, self.vertex_buffer.capacity);
}
let mut vertex_buffer_staging = queue
.write_buffer_with(
&self.vertex_buffer.buffer,
0,
NonZeroU64::new(required_vertex_buffer_size).unwrap(),
)
.expect("Failed to create staging buffer for vertex data");
let mut vertex_offset = 0;
for epaint::ClippedPrimitive { primitive, .. } in paint_jobs.iter() {
match primitive {
Primitive::Mesh(mesh) => {
let size = mesh.vertices.len() * std::mem::size_of::<Vertex>();
let slice = vertex_offset..(size + vertex_offset);
vertex_buffer_staging[slice.clone()]
.copy_from_slice(bytemuck::cast_slice(&mesh.vertices));
self.vertex_buffer.slices.push(slice);
vertex_offset += size;
}
Primitive::Callback(_) => {}
}
}
}
{
crate::profile_scope!("user command buffers");
let mut user_cmd_bufs = Vec::new(); // collect user command buffers
for epaint::ClippedPrimitive { primitive, .. } in paint_jobs.iter() {
match primitive {
Primitive::Mesh(_) => {}
Primitive::Callback(callback) => {
let cbfn = if let Some(c) = callback.callback.downcast_ref::<CallbackFn>() {
c
} else {
tracing::warn!(
"Unknown paint callback: expected `egui_wgpu::CallbackFn`"
);
continue;
};
crate::profile_scope!("callback");
user_cmd_bufs.extend((cbfn.prepare)(
device,
queue,
encoder,
&mut self.paint_callback_resources,
));
}
}
}
user_cmd_bufs
}
}
}
fn create_sampler(
options: epaint::textures::TextureOptions,
device: &wgpu::Device,
) -> wgpu::Sampler {
let mag_filter = match options.magnification {
epaint::textures::TextureFilter::Nearest => wgpu::FilterMode::Nearest,
epaint::textures::TextureFilter::Linear => wgpu::FilterMode::Linear,
};
let min_filter = match options.minification {
epaint::textures::TextureFilter::Nearest => wgpu::FilterMode::Nearest,
epaint::textures::TextureFilter::Linear => wgpu::FilterMode::Linear,
};
device.create_sampler(&wgpu::SamplerDescriptor {
label: Some(&format!(
"egui sampler (mag: {:?}, min {:?})",
mag_filter, min_filter
)),
mag_filter,
min_filter,
..Default::default()
})
}
fn create_vertex_buffer(device: &wgpu::Device, size: u64) -> wgpu::Buffer {
crate::profile_function!();
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("egui_vertex_buffer"),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
size,
mapped_at_creation: false,
})
}
fn create_index_buffer(device: &wgpu::Device, size: u64) -> wgpu::Buffer {
crate::profile_function!();
device.create_buffer(&wgpu::BufferDescriptor {
label: Some("egui_index_buffer"),
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
size,
mapped_at_creation: false,
})
}
/// A Rect in physical pixel space, used for setting clipping rectangles.
struct ScissorRect {
x: u32,
y: u32,
width: u32,
height: u32,
}
impl ScissorRect {
fn new(clip_rect: &epaint::Rect, pixels_per_point: f32, target_size: [u32; 2]) -> Self {
// Transform clip rect to physical pixels:
let clip_min_x = pixels_per_point * clip_rect.min.x;
let clip_min_y = pixels_per_point * clip_rect.min.y;
let clip_max_x = pixels_per_point * clip_rect.max.x;
let clip_max_y = pixels_per_point * clip_rect.max.y;
// Round to integer:
let clip_min_x = clip_min_x.round() as u32;
let clip_min_y = clip_min_y.round() as u32;
let clip_max_x = clip_max_x.round() as u32;
let clip_max_y = clip_max_y.round() as u32;
// Clamp:
let clip_min_x = clip_min_x.clamp(0, target_size[0]);
let clip_min_y = clip_min_y.clamp(0, target_size[1]);
let clip_max_x = clip_max_x.clamp(clip_min_x, target_size[0]);
let clip_max_y = clip_max_y.clamp(clip_min_y, target_size[1]);
Self {
x: clip_min_x,
y: clip_min_y,
width: clip_max_x - clip_min_x,
height: clip_max_y - clip_min_y,
}
}
}
#[test]
fn renderer_impl_send_sync() {
fn assert_send_sync<T: Send + Sync>() {}
assert_send_sync::<Renderer>();
}

View file

@ -0,0 +1,630 @@
use std::sync::Arc;
use epaint::{self, mutex::RwLock};
use tracing::error;
use crate::{renderer, RenderState, Renderer, SurfaceErrorAction, WgpuConfiguration};
struct SurfaceState {
surface: wgpu::Surface,
alpha_mode: wgpu::CompositeAlphaMode,
width: u32,
height: u32,
}
/// A texture and a buffer for reading the rendered frame back to the cpu.
/// The texture is required since [`wgpu::TextureUsages::COPY_DST`] is not an allowed
/// flag for the surface texture on all platforms. This means that anytime we want to
/// capture the frame, we first render it to this texture, and then we can copy it to
/// both the surface texture and the buffer, from where we can pull it back to the cpu.
struct CaptureState {
texture: wgpu::Texture,
buffer: wgpu::Buffer,
padding: BufferPadding,
}
impl CaptureState {
fn new(device: &Arc<wgpu::Device>, surface_texture: &wgpu::Texture) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("egui_screen_capture_texture"),
size: surface_texture.size(),
mip_level_count: surface_texture.mip_level_count(),
sample_count: surface_texture.sample_count(),
dimension: surface_texture.dimension(),
format: surface_texture.format(),
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let padding = BufferPadding::new(surface_texture.width());
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("egui_screen_capture_buffer"),
size: (padding.padded_bytes_per_row * texture.height()) as u64,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
Self {
texture,
buffer,
padding,
}
}
}
struct BufferPadding {
unpadded_bytes_per_row: u32,
padded_bytes_per_row: u32,
}
impl BufferPadding {
fn new(width: u32) -> Self {
let bytes_per_pixel = std::mem::size_of::<u32>() as u32;
let unpadded_bytes_per_row = width * bytes_per_pixel;
let padded_bytes_per_row =
wgpu::util::align_to(unpadded_bytes_per_row, wgpu::COPY_BYTES_PER_ROW_ALIGNMENT);
Self {
unpadded_bytes_per_row,
padded_bytes_per_row,
}
}
}
/// Everything you need to paint egui with [`wgpu`] on [`winit`].
///
/// Alternatively you can use [`crate::renderer`] directly.
pub struct Painter {
configuration: WgpuConfiguration,
msaa_samples: u32,
support_transparent_backbuffer: bool,
depth_format: Option<wgpu::TextureFormat>,
depth_texture_view: Option<wgpu::TextureView>,
msaa_texture_view: Option<wgpu::TextureView>,
screen_capture_state: Option<CaptureState>,
instance: wgpu::Instance,
adapter: Option<wgpu::Adapter>,
render_state: Option<RenderState>,
surface_state: Option<SurfaceState>,
}
impl Painter {
/// Manages [`wgpu`] state, including surface state, required to render egui.
///
/// Only the [`wgpu::Instance`] is initialized here. Device selection and the initialization
/// of render + surface state is deferred until the painter is given its first window target
/// via [`set_window()`](Self::set_window). (Ensuring that a device that's compatible with the
/// native window is chosen)
///
/// Before calling [`paint_and_update_textures()`](Self::paint_and_update_textures) a
/// [`wgpu::Surface`] must be initialized (and corresponding render state) by calling
/// [`set_window()`](Self::set_window) once you have
/// a [`winit::window::Window`] with a valid `.raw_window_handle()`
/// associated.
pub fn new(
configuration: WgpuConfiguration,
msaa_samples: u32,
depth_bits: u8,
support_transparent_backbuffer: bool,
) -> Self {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: configuration.backends,
dx12_shader_compiler: Default::default(), //
});
Self {
configuration,
msaa_samples,
support_transparent_backbuffer,
depth_format: (depth_bits > 0).then_some(wgpu::TextureFormat::Depth32Float),
depth_texture_view: None,
screen_capture_state: None,
instance,
adapter: None,
render_state: None,
surface_state: None,
msaa_texture_view: None,
}
}
/// Get the [`RenderState`].
///
/// Will return [`None`] if the render state has not been initialized yet.
pub fn render_state(&self) -> Option<RenderState> {
self.render_state.clone()
}
async fn init_render_state(
&self,
adapter: &wgpu::Adapter,
target_format: wgpu::TextureFormat,
) -> Result<RenderState, wgpu::RequestDeviceError> {
adapter
.request_device(&self.configuration.device_descriptor, None)
.await
.map(|(device, queue)| {
let renderer =
Renderer::new(&device, target_format, self.depth_format, self.msaa_samples);
RenderState {
device: Arc::new(device),
queue: Arc::new(queue),
target_format,
renderer: Arc::new(RwLock::new(renderer)),
}
})
}
// We want to defer the initialization of our render state until we have a surface
// so we can take its format into account.
//
// After we've initialized our render state once though we expect all future surfaces
// will have the same format and so this render state will remain valid.
async fn ensure_render_state_for_surface(
&mut self,
surface: &wgpu::Surface,
) -> Result<(), wgpu::RequestDeviceError> {
if self.adapter.is_none() {
self.adapter = self
.instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: self.configuration.power_preference,
compatible_surface: Some(surface),
force_fallback_adapter: false,
})
.await;
}
if self.render_state.is_none() {
match &self.adapter {
Some(adapter) => {
let swapchain_format = crate::preferred_framebuffer_format(
&surface.get_capabilities(adapter).formats,
);
let rs = self.init_render_state(adapter, swapchain_format).await?;
self.render_state = Some(rs);
}
None => return Err(wgpu::RequestDeviceError {}),
}
}
Ok(())
}
fn configure_surface(
surface_state: &SurfaceState,
render_state: &RenderState,
present_mode: wgpu::PresentMode,
) {
surface_state.surface.configure(
&render_state.device,
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_DST,
format: render_state.target_format,
width: surface_state.width,
height: surface_state.height,
present_mode,
alpha_mode: surface_state.alpha_mode,
view_formats: vec![render_state.target_format],
},
);
}
/// Updates (or clears) the [`winit::window::Window`] associated with the [`Painter`]
///
/// This creates a [`wgpu::Surface`] for the given Window (as well as initializing render
/// state if needed) that is used for egui rendering.
///
/// This must be called before trying to render via
/// [`paint_and_update_textures`](Self::paint_and_update_textures)
///
/// # Portability
///
/// _In particular it's important to note that on Android a it's only possible to create
/// a window surface between `Resumed` and `Paused` lifecycle events, and Winit will panic on
/// attempts to query the raw window handle while paused._
///
/// On Android [`set_window`](Self::set_window) should be called with `Some(window)` for each
/// `Resumed` event and `None` for each `Paused` event. Currently, on all other platforms
/// [`set_window`](Self::set_window) may be called with `Some(window)` as soon as you have a
/// valid [`winit::window::Window`].
///
/// # Safety
///
/// The raw Window handle associated with the given `window` must be a valid object to create a
/// surface upon and must remain valid for the lifetime of the created surface. (The surface may
/// be cleared by passing `None`).
///
/// # Errors
/// If the provided wgpu configuration does not match an available device.
pub async unsafe fn set_window(
&mut self,
window: Option<&winit::window::Window>,
) -> Result<(), crate::WgpuError> {
match window {
Some(window) => {
let surface = self.instance.create_surface(&window)?;
self.ensure_render_state_for_surface(&surface).await?;
let alpha_mode = if self.support_transparent_backbuffer {
let supported_alpha_modes = surface
.get_capabilities(self.adapter.as_ref().unwrap())
.alpha_modes;
// Prefer pre multiplied over post multiplied!
if supported_alpha_modes.contains(&wgpu::CompositeAlphaMode::PreMultiplied) {
wgpu::CompositeAlphaMode::PreMultiplied
} else if supported_alpha_modes
.contains(&wgpu::CompositeAlphaMode::PostMultiplied)
{
wgpu::CompositeAlphaMode::PostMultiplied
} else {
tracing::warn!("Transparent window was requested, but the active wgpu surface does not support a `CompositeAlphaMode` with transparency.");
wgpu::CompositeAlphaMode::Auto
}
} else {
wgpu::CompositeAlphaMode::Auto
};
let size = window.inner_size();
self.surface_state = Some(SurfaceState {
surface,
width: size.width,
height: size.height,
alpha_mode,
});
self.resize_and_generate_depth_texture_view_and_msaa_view(size.width, size.height);
}
None => {
self.surface_state = None;
}
}
Ok(())
}
/// Returns the maximum texture dimension supported if known
///
/// This API will only return a known dimension after `set_window()` has been called
/// at least once, since the underlying device and render state are initialized lazily
/// once we have a window (that may determine the choice of adapter/device).
pub fn max_texture_side(&self) -> Option<usize> {
self.render_state
.as_ref()
.map(|rs| rs.device.limits().max_texture_dimension_2d as usize)
}
fn resize_and_generate_depth_texture_view_and_msaa_view(
&mut self,
width_in_pixels: u32,
height_in_pixels: u32,
) {
let render_state = self.render_state.as_ref().unwrap();
let surface_state = self.surface_state.as_mut().unwrap();
surface_state.width = width_in_pixels;
surface_state.height = height_in_pixels;
Self::configure_surface(surface_state, render_state, self.configuration.present_mode);
self.depth_texture_view = self.depth_format.map(|depth_format| {
render_state
.device
.create_texture(&wgpu::TextureDescriptor {
label: Some("egui_depth_texture"),
size: wgpu::Extent3d {
width: width_in_pixels,
height: height_in_pixels,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: depth_format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[depth_format],
})
.create_view(&wgpu::TextureViewDescriptor::default())
});
self.msaa_texture_view = (self.msaa_samples > 1)
.then_some(self.render_state.as_ref())
.flatten()
.map(|render_state| {
let texture_format = render_state.target_format;
render_state
.device
.create_texture(&wgpu::TextureDescriptor {
label: Some("egui_msaa_texture"),
size: wgpu::Extent3d {
width: width_in_pixels,
height: height_in_pixels,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: self.msaa_samples,
dimension: wgpu::TextureDimension::D2,
format: texture_format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[texture_format],
})
.create_view(&wgpu::TextureViewDescriptor::default())
});
}
pub fn on_window_resized(&mut self, width_in_pixels: u32, height_in_pixels: u32) {
if self.surface_state.is_some() {
self.resize_and_generate_depth_texture_view_and_msaa_view(
width_in_pixels,
height_in_pixels,
);
} else {
error!("Ignoring window resize notification with no surface created via Painter::set_window()");
}
}
// CaptureState only needs to be updated when the size of the two textures don't match and we want to
// capture a frame
fn update_capture_state(
screen_capture_state: &mut Option<CaptureState>,
surface_texture: &wgpu::SurfaceTexture,
render_state: &RenderState,
) {
let surface_texture = &surface_texture.texture;
match screen_capture_state {
Some(capture_state) => {
if capture_state.texture.size() != surface_texture.size() {
*capture_state = CaptureState::new(&render_state.device, surface_texture);
}
}
None => {
*screen_capture_state =
Some(CaptureState::new(&render_state.device, surface_texture));
}
}
}
// Handles copying from the CaptureState texture to the surface texture and the cpu
fn read_screen_rgba(
screen_capture_state: &CaptureState,
render_state: &RenderState,
output_frame: &wgpu::SurfaceTexture,
) -> Option<epaint::ColorImage> {
let CaptureState {
texture: tex,
buffer,
padding,
} = screen_capture_state;
let device = &render_state.device;
let queue = &render_state.queue;
let tex_extent = tex.size();
let mut encoder = device.create_command_encoder(&Default::default());
encoder.copy_texture_to_buffer(
tex.as_image_copy(),
wgpu::ImageCopyBuffer {
buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(std::num::NonZeroU32::new(padding.padded_bytes_per_row)?),
rows_per_image: None,
},
},
tex_extent,
);
encoder.copy_texture_to_texture(
tex.as_image_copy(),
output_frame.texture.as_image_copy(),
tex.size(),
);
let id = queue.submit(Some(encoder.finish()));
let buffer_slice = buffer.slice(..);
let (sender, receiver) = std::sync::mpsc::channel();
buffer_slice.map_async(wgpu::MapMode::Read, move |v| {
drop(sender.send(v));
});
device.poll(wgpu::Maintain::WaitForSubmissionIndex(id));
receiver.recv().ok()?.ok()?;
let to_rgba = match tex.format() {
wgpu::TextureFormat::Rgba8Unorm => [0, 1, 2, 3],
wgpu::TextureFormat::Bgra8Unorm => [2, 1, 0, 3],
_ => {
tracing::error!("Screen can't be captured unless the surface format is Rgba8Unorm or Bgra8Unorm. Current surface format is {:?}", tex.format());
return None;
}
};
let mut pixels = Vec::with_capacity((tex.width() * tex.height()) as usize);
for padded_row in buffer_slice
.get_mapped_range()
.chunks(padding.padded_bytes_per_row as usize)
{
let row = &padded_row[..padding.unpadded_bytes_per_row as usize];
for color in row.chunks(4) {
pixels.push(epaint::Color32::from_rgba_premultiplied(
color[to_rgba[0]],
color[to_rgba[1]],
color[to_rgba[2]],
color[to_rgba[3]],
));
}
}
buffer.unmap();
Some(epaint::ColorImage {
size: [tex.width() as usize, tex.height() as usize],
pixels,
})
}
// Returns a vector with the frame's pixel data if it was requested.
pub fn paint_and_update_textures(
&mut self,
pixels_per_point: f32,
clear_color: [f32; 4],
clipped_primitives: &[epaint::ClippedPrimitive],
textures_delta: &epaint::textures::TexturesDelta,
capture: bool,
) -> Option<epaint::ColorImage> {
crate::profile_function!();
let render_state = self.render_state.as_mut()?;
let surface_state = self.surface_state.as_ref()?;
let output_frame = {
crate::profile_scope!("get_current_texture");
// This is what vsync-waiting happens, at least on Mac.
surface_state.surface.get_current_texture()
};
let output_frame = match output_frame {
Ok(frame) => frame,
#[allow(clippy::single_match_else)]
Err(e) => match (*self.configuration.on_surface_error)(e) {
SurfaceErrorAction::RecreateSurface => {
Self::configure_surface(
surface_state,
render_state,
self.configuration.present_mode,
);
return None;
}
SurfaceErrorAction::SkipFrame => {
return None;
}
},
};
let mut encoder =
render_state
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("encoder"),
});
// Upload all resources for the GPU.
let screen_descriptor = renderer::ScreenDescriptor {
size_in_pixels: [surface_state.width, surface_state.height],
pixels_per_point,
};
let user_cmd_bufs = {
let mut renderer = render_state.renderer.write();
for (id, image_delta) in &textures_delta.set {
renderer.update_texture(
&render_state.device,
&render_state.queue,
*id,
image_delta,
);
}
renderer.update_buffers(
&render_state.device,
&render_state.queue,
&mut encoder,
clipped_primitives,
&screen_descriptor,
)
};
{
let renderer = render_state.renderer.read();
let frame_view = if capture {
Self::update_capture_state(
&mut self.screen_capture_state,
&output_frame,
render_state,
);
self.screen_capture_state
.as_ref()?
.texture
.create_view(&wgpu::TextureViewDescriptor::default())
} else {
output_frame
.texture
.create_view(&wgpu::TextureViewDescriptor::default())
};
let (view, resolve_target) = (self.msaa_samples > 1)
.then_some(self.msaa_texture_view.as_ref())
.flatten()
.map_or((&frame_view, None), |texture_view| {
(texture_view, Some(&frame_view))
});
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view,
resolve_target,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: clear_color[0] as f64,
g: clear_color[1] as f64,
b: clear_color[2] as f64,
a: clear_color[3] as f64,
}),
store: true,
},
})],
depth_stencil_attachment: self.depth_texture_view.as_ref().map(|view| {
wgpu::RenderPassDepthStencilAttachment {
view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}
}),
label: Some("egui_render"),
});
renderer.render(&mut render_pass, clipped_primitives, &screen_descriptor);
}
{
let mut renderer = render_state.renderer.write();
for id in &textures_delta.free {
renderer.free_texture(id);
}
}
let encoded = {
crate::profile_scope!("CommandEncoder::finish");
encoder.finish()
};
// Submit the commands: both the main buffer and user-defined ones.
{
crate::profile_scope!("Queue::submit");
render_state
.queue
.submit(user_cmd_bufs.into_iter().chain(std::iter::once(encoded)));
};
let screenshot = if capture {
let screen_capture_state = self.screen_capture_state.as_ref()?;
Self::read_screen_rgba(screen_capture_state, render_state, &output_frame)
} else {
None
};
// Redraw egui
{
crate::profile_scope!("present");
output_frame.present();
}
screenshot
}
#[allow(clippy::unused_self)]
pub fn destroy(&mut self) {
// TODO(emilk): something here?
}
}