From b0f651fecbaa88762c75a07f655a84c0e9f87310 Mon Sep 17 00:00:00 2001 From: robtfm <50659922+robtfm@users.noreply.github.com> Date: Wed, 22 Jun 2022 00:36:29 +0100 Subject: [PATCH] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 114d169dcec0e426e7815f184c0f85e67713c95b Author: Robert Swain Date: Tue Jun 21 20:50:06 2022 +0000 Callable PBR functions (#4939) # Objective - Builds on top of #4938 - Make clustered-forward PBR lighting/shadows functionality callable - See #3969 for details ## Solution - Add `PbrInput` struct type containing a `StandardMaterial`, occlusion, world_position, world_normal, and frag_coord - Split functionality to calculate the unit view vector, and normal-mapped normal into `bevy_pbr::pbr_functions` - Split high-level shading flow into `pbr(in: PbrInput, N: vec3, V: vec3, is_orthographic: bool)` function in `bevy_pbr::pbr_functions` - Rework `pbr.wgsl` fragment stage entry point to make use of the new functions - This has been benchmarked on an M1 Max using `many_cubes -- sphere`. `main` had a median frame time of 15.88ms, this PR 15.99ms, which is a 0.69% frame time increase, which is within noise in my opinion. --- ## Changelog - Added: PBR shading code is now callable. Import `bevy_pbr::pbr_functions` and its dependencies, create a `PbrInput`, calculate the unit view and normal-mapped normal vectors and whether the projection is orthographic, and call `pbr()`! commit c98826418027362e6f58fae47ad18b9df7ab2df5 Author: James Liu Date: Tue Jun 21 20:35:26 2022 +0000 Mark mutable APIs under ECS storage as pub(crate) (#5065) # Objective Closes #1557. Partially addresses #3362. Cleanup the public facing API for storage types. Most of these APIs are difficult to use safely when directly interfacing with these types, and is also currently impossible to interact with in normal ECS use as there is no `World::storages_mut`. The majority of these types should be easy enough to read, and perhaps mutate the contents, but never structurally altered without the same checks in the rest of bevy_ecs code. This both cleans up the public facing types and helps use unused code detection to remove a few of the APIs we're not using internally. ## Solution - Mark all APIs that take `&mut T` under `bevy_ecs::storage` as `pub(crate)` or `pub(super)` - Cleanup after it all. Entire type visibility changes: - `BlobVec` is `pub(super)`, only storage code should be directly interacting with it. - `SparseArray` is now `pub(crate)` for the entire type. It's an implementation detail for `Table` and `(Component)SparseSet`. - `TableMoveResult` is now `pub(crate) --- ## Changelog TODO ## Migration Guide Dear God, I hope not. commit 389df183433c85520f7aeda7a46e6eb26859fd72 Author: James Liu Date: Tue Jun 21 18:10:27 2022 +0000 Change check_visibility to use thread-local queues instead of a channel (#4663) # Objective Further speed up visibility checking by removing the main sources of contention for the system. ## Solution - ~~Make `ComputedVisibility` a resource wrapping a `FixedBitset`.~~ - ~~Remove `ComputedVisibility` as a component.~~ ~~This adds a one-bit overhead to every entity in the app world. For a game with 100,000 entities, this is 12.5KB of memory. This is still small enough to fit entirely in most L1 caches. Also removes the need for a per-Entity change detection tick. This reduces the memory footprint of ComputedVisibility 72x.~~ ~~The decreased memory usage and less fragmented memory locality should provide significant performance benefits.~~ ~~Clearing visible entities should be significantly faster than before:~~ - ~~Setting one `u32` to 0 clears 32 entities per cycle.~~ - ~~No archetype fragmentation to contend with.~~ - ~~Change detection is applied to the resource, so there is no per-Entity update tick requirement.~~ ~~The side benefit of this design is that it removes one more "computed component" from userspace. Though accessing the values within it are now less ergonomic.~~ This PR changes `crossbeam_channel` in `check_visibility` to use a `Local>>` to mark down visible entities instead. Co-Authored-By: TheRawMeatball Co-Authored-By: Aevyrie commit 511bcc963335314c9f655c49faa273b43bac02b6 Author: Federico Rinaldi Date: Tue Jun 21 15:29:22 2022 +0000 Improve entity and component API docs (#4767) # Objective The descriptions included in the API docs of `entity` module, `Entity` struct, and `Component` trait have some issues: 1. the concept of entity is not clearly defined, 2. descriptions are a little bit out of place, 3. in a case the description leak too many details about the implementation, 4. some descriptions are not exhaustive, 5. there are not enough examples, 6. the content can be formatted in a much better way. ## Solution 1. ~~Stress the fact that entity is an abstract and elementary concept. Abstract because the concept of entity is not hardcoded into the library but emerges from the interaction of `Entity` with every other part of `bevy_ecs`, like components and world methods. Elementary because it is a fundamental concept that cannot be defined with other terms (like point in euclidean geometry, or time in classical physics).~~ We decided to omit the definition of entity in the API docs ([see why]). It is only described in its relationship with components. 2. Information has been moved to relevant places and links are used instead in the other places. 3. Implementation details about `Entity` have been reduced. 4. Descriptions have been made more exhaustive by stating how to obtain and use items. Entity operations are enriched with `World` methods. 5. Examples have been added or enriched. 6. Sections have been added to organize content. Entity operations are now laid out in a table. ### Todo list - [x] Break lines at sentence-level. ## For reviewers - ~~I added a TODO over `Component` docs, make sure to check it out and discuss it if necessary.~~ ([Resolved]) - You can easily check the rendered documentation by doing `cargo doc -p bevy_ecs --no-deps --open`. [see why]: https://github.com/bevyengine/bevy/pull/4767#discussion_r875106329 [Resolved]: https://github.com/bevyengine/bevy/pull/4767#discussion_r874127825 commit c4fc5d88f0ba2d86ff6c9a489154e4ca29666e3b Author: Félix Lescaudey de Maneville Date: Mon Jun 20 20:32:19 2022 +0000 Fixed bevy_ui touch input (#4099) # Objective `bevy_ui` doesn't support correctly touch inputs because of two problems in the focus system: - It attempts to retrieve touch input with a specific `0` id - It doesn't retrieve touch positions and bases its focus solely on mouse position, absent from mobile devices ## Solution I added a few methods to the `Touches` resource, allowing to check if **any** touch input was pressed, released or cancelled and to retrieve the *position* of the first pressed touch input and adapted the focus system. I added a test button to the *iOS* example and it works correclty on emulator. I did not test on a real touch device as: - Android is not working (https://github.com/bevyengine/bevy/issues/3249) - I don't have an iOS device commit 30ca97e287de91b3fe6b4d5b1ce27126c1e3f73a Author: Domi Date: Mon Jun 20 19:06:38 2022 +0000 Fix Nix section of linux_dependencies.md (#5050) # Objective `nix-shell` reported: ```error: 'x11' has been renamed to/replaced by 'xlibsWrapper'```. ## Solution Replacing `x11` with `xlibsWrapper` in the Nix section of linux_dependencies.md fixes the problem on my system, and bevy projects build fine. commit 984ce3fa2289aaa680d820675f748d0858732780 Author: Hoidigan <57080125+Hoidigan@users.noreply.github.com> Date: Mon Jun 20 18:31:46 2022 +0000 Add `Input::reset_all` (#5015) Adds a `reset_all` method to reset `pressed`, `just_pressed`, and `just_released` on the `Input`. Fixes #3383 commit 9089c8b73e26c1d9f1054d23d9f762e49e04b1d9 Author: Mark Lodato Date: Mon Jun 20 18:04:31 2022 +0000 Fix redundant "have" in CONTRIBUTING (#5036) **This Commit** 1. Makes it so the sentence doesn't read "are contributors who have Have actively ..." 2. Makes it so all three bullet points end in punctuation **Notes** Could also remove the leading "Have" from all bullet points and leave it on the previous sentence. That's the least redundant but I guess this is more flexible if we want to add a sentence that doesn't start with "Have" later. commit 515c8a3f505e43b29ef6ef584cbd26d62cb357ed Author: Mark Lodato Date: Mon Jun 20 18:04:29 2022 +0000 Update `clap` to 3.2 in tools using `value_parser` (#5031) **Why?** The `value_parser` `clap` attribute was added in [version 3.2.0][0]. With the current version of `3.1.12` users can get errors like: ``` error: unexpected attribute: value_parser --> tools/spancmp/src/main.rs:18:25 | 18 | #[clap(short, long, value_parser, default_value_t = 0.0)] | ^^^^^^^^^^^^ ``` See https://github.com/bevyengine/bevy/pull/4944#issuecomment-1157704785 for more details. [0]: https://github.com/clap-rs/clap/blob/master/CHANGELOG.md#320---2022-06-13 commit 2ec5ff9652297ec29d8bb502fe2ac24c7e2fefe6 Author: Hoidigan <57080125+Hoidigan@users.noreply.github.com> Date: Mon Jun 20 17:35:56 2022 +0000 Add a `release_all` function to `Input`. (#5011) Adds a `release_all` function to `Input` that releases all of the currently pressed inputs and marks them as just released. commit 3217f216aaf18e855ad925079098c6c0535cf55d Author: Mike Date: Mon Jun 20 17:35:55 2022 +0000 change panicking test to not run on global task pool (#4998) # Objective - Fixes #4996 ## Solution - Panicking on the global task pool is probably bad. This changes the panicking test to use a single threaded stage to run the test instead. - I checked the other #[should_panic] - I also added explicit ordering between the transform propagate system and the parent update system. The ambiguous ordering didn't seem to be causing problems, but the tests are probably more correct this way. The plugins that add these systems have an explicit ordering. I can remove this if necessary. ## Note I don't have a 100% mental model of why panicking is causing intermittent failures. It probably has to do with a task for one of the other tests landing on the panicking thread when it actually panics. Why this causes a problem I'm not sure, but this PR seems to fix things. ## Open questions - there are some other #[should_panic] tests that run on the task pool in stage.rs. I don't think we restart panicked threads, so this might be killing most of the threads on the pool. But since they're not causing test failures, we should probably decide what to do about that separately. The solution in this PR won't work since those tests are explicitly testing parallelism. commit 92ea73036225ae47322b99577542afdaa58517d3 Author: JoJoJet Date: Mon Jun 20 17:35:54 2022 +0000 Add benchmarks for schedule dependency resolution (#4961) # Objective - Add benchmarks to test the performance of `Schedule`'s system dependency resolution. ## Solution - Do a series of benchmarks while increasing the number of systems in the schedule to see how the run-time scales. - Split the benchmarks into a group with no dependencies, and a group with many dependencies. commit 218b0fd3b6edebab28d8e74498ef9ba97c88c55e Author: Jakob Hellermann Date: Mon Jun 20 17:18:58 2022 +0000 `bevy_reflect`: put `serialize` into external `ReflectSerialize` type (#4782) builds on top of #4780 # Objective `Reflect` and `Serialize` are currently very tied together because `Reflect` has a `fn serialize(&self) -> Option>` method. Because of that, we can either implement `Reflect` for types like `Option` with `T: Serialize` and have `fn serialize` be implemented, or without the bound but having `fn serialize` return `None`. By separating `ReflectSerialize` into a separate type (like how it already is for `ReflectDeserialize`, `ReflectDefault`), we could separately `.register::>()` and `.register_data::, ReflectSerialize>()` only if the type `T: Serialize`. This PR does not change the registration but allows it to be changed in a future PR. ## Solution - add the type ```rust struct ReflectSerialize { .. } impl FromType for ReflectSerialize { .. } ``` - remove `#[reflect(Serialize)]` special casing. - when serializing reflect value types, look for `ReflectSerialize` in the `TypeRegistry` instead of calling `value.serialize()` commit bb1d5248339d7a8508c9f794a03b2553e61d17cb Author: François Date: Mon Jun 20 17:02:25 2022 +0000 Cleanups in diagnostics (#3871) - changed `EntityCountDiagnosticsPlugin` to not use an exclusive system to get its entity count - removed mention of `WgpuResourceDiagnosticsPlugin` in example `log_diagnostics` as it doesn't exist anymore - added ability to enable, disable ~~or toggle~~ a diagnostic (fix #3767) - made diagnostic values lazy, so they are only computed if the diagnostic is enabled - do not log an average for diagnostics with only one value - removed `sum` function from diagnostic as it isn't really useful - ~~do not keep an average of the FPS diagnostic. it is already an average on the last 20 frames, so the average FPS was an average of the last 20 frames over the last 20 frames~~ - do not compute the FPS value as an average over the last 20 frames but give the actual "instant FPS" - updated log format to use variable capture - added some doc - the frame counter diagnostic value can be reseted to 0 commit 9095d2fb31114f36e6e00b3ae4f8d6fbd88ee5ac Author: Aevyrie Date: Mon Jun 20 11:19:58 2022 +0000 Physical viewport calculation fix (#5055) # Objective - Fixes early return when viewport is not set. This now matches the description of the function. ## Solution - Remove errant try `?`. commit 8e8cbcc623b1e998788d70ac701ea9c50dc7c3c4 Author: François Date: Mon Jun 20 10:32:44 2022 +0000 gltf: do not import IoTaskPool in wasm (#5038) # Objective - Remove a warning when building for wasm ## Solution - Do not import the dependency when building for wasm commit d717c63d341739433755a0858e7a1b4bc575062f Author: François Date: Mon Jun 20 10:32:43 2022 +0000 enable optional dependencies to stay optional (#5023) # Objective - Optional dependencies were enabled by some features as a side effect. for example, enabling the `webgl` feature enables the `bevy_pbr` optional dependency ## Solution - Use the syntax introduced in rust 1.60 to specify weak dependency features: https://blog.rust-lang.org/2022/04/07/Rust-1.60.0.html#new-syntax-for-cargo-features > Weak dependency features tackle the second issue where the `"optional-dependency/feature-name"` syntax would always enable `optional-dependency`. However, often you want to enable the feature on the optional dependency only if some other feature has enabled the optional dependency. Starting in 1.60, you can add a ? as in `"package-name?/feature-name"` which will only enable the given feature if something else has enabled the optional dependency. commit 5dbb178d5d4459009c260293db0a7780f3e9b803 Author: Corey Farwell Date: Sun Jun 19 16:53:49 2022 +0000 Implement `Eq` and `PartialEq` for `MouseScrollUnit` (#5048) commit 8b27124a801d83a1e92f1136ebd2bc9752c3e9d7 Author: François Date: Sat Jun 18 07:41:54 2022 +0000 WGSL: use correct syntax for matrix access (#5039) # Objective - `.x` is not the correct syntax to access a column in a matrix in WGSL: https://www.w3.org/TR/WGSL/#matrix-access-expr - naga accepts it and translates it correctly, but it's not valid when shaders are kept as is and used directly in WGSL ## Solution - Use the correct syntax commit a62ff657fec0023ca7d2d2daefb4573fb2814571 Author: François Date: Fri Jun 17 22:34:58 2022 +0000 update hashbrown to 0.12 (#5035) # Objective - Update hashbrown to 0.12 ## Solution - Replace #4004 - As the 0.12 is already in Bevy dependency tree, it shouldn't be an issue to update - The exception for the 0.11 should be removed once https://github.com/zakarumych/gpu-descriptor/pull/21 is merged and released - Also removed a few exceptions that weren't needed anymore commit caa61c5fb7990a816766d0797a81a8a9ac07b3e8 Author: Robert Swain Date: Fri Jun 17 00:14:02 2022 +0000 bevy_render: Fix KTX2 UASTC format mapping (#4569) # Objective - KTX2 UASTC format mapping was incorrect. For some reason I had written it to map to a set of data formats based on the count of KTX2 sample information blocks, but the mapping should be done based on the channel type in the sample information. - This is a valid change pulled out from #4514 as the attempt to fix the array textures there was incorrect ## Solution - Fix the KTX2 UASTC `DataFormat` enum to contain the correct formats based on the channel types in section 3.10.2 of https://github.khronos.org/KTX-Specification/ (search for "Basis Universal UASTC Format") - Correctly map from the sample information channel type to `DataFormat` - Correctly configure transcoding and the resulting texture format based on the `DataFormat` --- ## Changelog - Fixed: KTX2 UASTC format handling commit 14ed3b30cbbc4ab51de8a7fafce6baff1eb62b73 Author: Arnav Choubey <56453634+x-52@users.noreply.github.com> Date: Thu Jun 16 13:20:37 2022 +0000 Add documentation comments to `bevy_window` (#4333) # Objective - Add documentation comments and `#![warn(missing_docs)]` to `bevy_window`. - Part of #3492 commit ab72c8368f542c516a494069752b369ba6c2d131 Author: François Date: Wed Jun 15 19:18:53 2022 +0000 Fix ron deprecation (#5021) # Objective - Update to fix `ron` deprecation commit 32cd9899c84bb42b3b560f3e3b88102babc8eec0 Author: Ben Reeves Date: Wed Jun 15 06:29:52 2022 +0000 bevy_render: Add `attributes` and `attributes_mut` methods to `Mesh`. (#3927) # Use Case Seems generally useful, but specifically motivated by my work on the [`bevy_datasize`](https://github.com/BGR360/bevy_datasize) crate. For that project, I'm implementing "heap size estimators" for all of the Bevy internal types. To do this accurately for `Mesh`, I need to get the lengths of all of the mesh's attribute vectors. Currently, in order to accomplish this, I am doing the following: * Checking all of the attributes that are mentioned in the `Mesh` class ([see here](https://github.com/BGR360/bevy_datasize/blob/0531ec2d026085a31e937b12d5ecf4109005e737/src/builtins/render/mesh.rs#L46-L54)) * Providing the user with an option to configure additional attributes to check ([see here](https://github.com/BGR360/bevy_datasize/blob/0531ec2d026085a31e937b12d5ecf4109005e737/src/config.rs#L7-L21)) This is both overly complicated and a bit wasteful (since I have to check every attribute name that I know about in case there are attributes set for it). Co-authored-by: Carter Anderson commit dc950a4d2ff45cb0b35f16ec271e45bf3b6e8545 Author: Alice Cecile Date: Tue Jun 14 16:14:33 2022 +0000 Fix broken `WorldCell` test (#5009) # Objective Fixes #5008. Aliasing references is allowed under Rust if and only if they are immutable. This logic applies to `WorldCell` as well. commit 915fa69b666eae1d04976995534ffae969973dde Author: Aevyrie Date: Tue Jun 14 02:07:40 2022 +0000 Parallel Frustum Culling (#4489) # Objective Working with a large number of entities with `Aabbs`, rendered with an instanced shader, I found the bottleneck became the frustum culling system. The goal of this PR is to significantly improve culling performance without any major changes. We should consider constructing a BVH for more substantial improvements. ## Solution - Convert the inner entity query to a parallel iterator with `par_for_each_mut` using a batch size of 1,024. - This outperforms single threaded culling when there are more than 1,000 entities. - Below this they are approximately equal, with <= 10 microseconds of multithreading overhead. - Above this, the multithreaded version is significantly faster, scaling linearly with core count. - In my million-entity-workload, this PR improves my framerate by 200% - 300%. ## log-log of `check_visibility` time vs. entities for single/multithreaded ![image](https://user-images.githubusercontent.com/2632925/163709007-7eab4437-e9f9-4c06-bac0-250073885110.png) --- ## Changelog Frustum culling is now run with a parallel query. When culling more than a thousand entities, this is faster than the previous method, scaling proportionally with the number of available cores. commit c6222f1acc4a5d4478b5cb188f0eda24afb6546a Author: Robert Swain Date: Tue Jun 14 00:58:30 2022 +0000 Separate out PBR lighting, shadows, clustered forward, and utils from pbr.wgsl (#4938) # Objective - Builds on top of #4901 - Separate out PBR lighting, shadows, clustered forward, and utils from `pbr.wgsl` as part of making the PBR code more reusable and extensible. - See #3969 for details. ## Solution - Add `bevy_pbr::utils`, `bevy_pbr::clustered_forward`, `bevy_pbr::lighting`, `bevy_pbr::shadows` shader imports exposing many shader functions for external use - Split `PI`, `saturate()`, `hsv2rgb()`, and `random1D()` into `bevy_pbr::utils` - Split clustered-forward-specific functions into `bevy_pbr::clustered_forward`, including moving the debug visualization code into a `cluster_debug_visualization()` function in that import - Split PBR lighting functions into `bevy_pbr::lighting` - Split shadow functions into `bevy_pbr::shadows` --- ## Changelog - Added: `bevy_pbr::utils`, `bevy_pbr::clustered_forward`, `bevy_pbr::lighting`, `bevy_pbr::shadows` shader imports exposing many shader functions for external use - Split `PI`, `saturate()`, `hsv2rgb()`, and `random1D()` into `bevy_pbr::utils` - Split clustered-forward-specific functions into `bevy_pbr::clustered_forward`, including moving the debug visualization code into a `cluster_debug_visualization()` function in that import - Split PBR lighting functions into `bevy_pbr::lighting` - Split shadow functions into `bevy_pbr::shadows` commit b333386271b38d96a78702172d522981004a1f1c Author: Robert Swain Date: Tue Jun 14 00:32:33 2022 +0000 Add reusable shader functions for transforming position/normal/tangent (#4901) # Objective - Add reusable shader functions for transforming positions / normals / tangents between local and world / clip space for 2D and 3D so that they are done in a simple and correct way - The next step in #3969 so check there for more details. ## Solution - Add `bevy_pbr::mesh_functions` and `bevy_sprite::mesh2d_functions` shader imports - These contain `mesh_` and `mesh2d_` versions of the following functions: - `mesh_position_local_to_world` - `mesh_position_world_to_clip` - `mesh_position_local_to_clip` - `mesh_normal_local_to_world` - `mesh_tangent_local_to_world` - Use them everywhere where it is appropriate - Notably not in the sprite and UI shaders where `mesh2d_position_world_to_clip` could have been used, but including all the functions depends on the mesh binding so I chose to not use the function there - NOTE: The `mesh_` and `mesh2d_` functions are currently identical. However, if I had defined only `bevy_pbr::mesh_functions` and used that in bevy_sprite, then bevy_sprite would have a runtime dependency on bevy_pbr, which seems undesirable. I also expect that when we have a proper 2D rendering API, these functions will diverge between 2D and 3D. --- ## Changelog - Added: `bevy_pbr::mesh_functions` and `bevy_sprite::mesh2d_functions` shader imports containing `mesh_` and `mesh2d_` versions of the following functions: - `mesh_position_local_to_world` - `mesh_position_world_to_clip` - `mesh_position_local_to_clip` - `mesh_normal_local_to_world` - `mesh_tangent_local_to_world` ## Migration Guide - The `skin_tangents` function from the `bevy_pbr::skinning` shader import has been replaced with the `mesh_tangent_local_to_world` function from the `bevy_pbr::mesh_functions` shader import commit 407c080e599e98274290d2fc89739ecde0303b84 Author: Boxy Date: Mon Jun 13 23:35:54 2022 +0000 Replace `ReadOnlyFetch` with `ReadOnlyWorldQuery` (#4626) # Objective - Fix a type inference regression introduced by #3001 - Make read only bounds on world queries more user friendly ptrification required you to write `Q::Fetch: ReadOnlyFetch` as `for<'w> QueryFetch<'w, Q>: ReadOnlyFetch` which has the same type inference problem as `for<'w> QueryFetch<'w, Q>: FilterFetch<'w>` had, i.e. the following code would error: ```rust #[derive(Component)] struct Foo; fn bar(a: Query<(&Foo, Without)>) { foo(a); } fn foo(a: Query) where for<'w> QueryFetch<'w, Q>: ReadOnlyFetch, { } ``` `for<..>` bounds are also rather user unfriendly.. ## Solution Remove the `ReadOnlyFetch` trait in favour of a `ReadOnlyWorldQuery` trait, and remove `WorldQueryGats::ReadOnlyFetch` in favor of `WorldQuery::ReadOnly` allowing the previous code snippet to be written as: ```rust #[derive(Component)] struct Foo; fn bar(a: Query<(&Foo, Without)>) { foo(a); } fn foo(a: Query) {} ``` This avoids the `for<...>` bound which makes the code simpler and also fixes the type inference issue. The reason for moving the two functions out of `FetchState` and into `WorldQuery` is to allow the world query `&mut T` to share a `State` with the `&T` world query so that it can have `type ReadOnly = &T`. Presumably it would be possible to instead have a `ReadOnlyRefMut` world query and then do `type ReadOnly = ReadOnlyRefMut` much like how (before this PR) we had a `ReadOnlyWriteFetch`. A side benefit of the current solution in this PR is that it will likely make it easier in the future to support an API such as `Query<&mut T> -> Query<&T>`. The primary benefit IMO is just that `ReadOnlyRefMut` and its associated fetch would have to reimplement all of the logic that the `&T` world query impl does but this solution avoids that :) --- ## Changelog/Migration Guide The trait `ReadOnlyFetch` has been replaced with `ReadOnlyWorldQuery` along with the `WorldQueryGats::ReadOnlyFetch` assoc type which has been replaced with `::Fetch` - Any where clauses such as `QueryFetch: ReadOnlyFetch` should be replaced with `Q: ReadOnlyWorldQuery`. - Any custom world query impls should implement `ReadOnlyWorldQuery` insead of `ReadOnlyFetch` Functions `update_component_access` and `update_archetype_component_access` have been moved from the `FetchState` trait to `WorldQuery` - Any callers should now call `Q::update_component_access(state` instead of `state.update_component_access` (and `update_archetype_component_access` respectively) - Any custom world query impls should move the functions from the `FetchState` impl to `WorldQuery` impl `WorldQuery` has been made an `unsafe trait`, `FetchState` has been made a safe `trait`. (I think this is how it should have always been, but regardless this is _definitely_ necessary now that the two functions have been moved to `WorldQuery`) - If you have a custom `FetchState` impl make it a normal `impl` instead of `unsafe impl` - If you have a custom `WorldQuery` impl make it an `unsafe impl`, if your code was sound before it is going to still be sound commit 4050c8aa310b9c482f387d10c42aa4b59f399749 Author: Simonas Kazlauskas Date: Mon Jun 13 22:40:29 2022 +0000 bevy_log: upgrade to tracing-tracy 0.10.0 (#4991) This upgrade should bring some significant performance improvements to instrumentation. These are mostly achieved by disabling features (by default) that are likely not widely used by default – collection of callstacks and support for fibers that wasn't used for anything in particular yet. For callstack collection it might be worthwhile to provide a mechanism to enable this at runtime by calling `TracyLayer::with_stackdepth`. These should bring the cost of a single span down from 30+µs per span to a more reasonable 1.5µs or so and down to the ns scale for events (on my 1st gen Ryzen machine, anyway.) There is still a fair amount of overhead over plain tracy_client instrumentation in formatting and such, but dealing with it requires significant effort and this is a straightforward improvement to have for the time being. Co-authored-by: Simonas Kazlauskas commit 0560aa58932bba5035fc7ebd0cdecfaa015dbb93 Author: Mike Date: Mon Jun 13 21:51:16 2022 +0000 Fix clap for CI (#5005) # Objective - Fix CI - relevant clap issue https://github.com/clap-rs/clap/issues/3822 ## Solution - slap `value_parser` on all the clap derives. This tells clap to use the default parser for the type. commit b7d784de6e27c46cc42231e1dea94c3ba4de9463 Author: Chris Dawkins Date: Sun Jun 12 19:34:26 2022 +0000 Bugfix `State::set` transition condition infinite loop (#4890) # Objective - Fixes #4271 ## Solution - Check for a pending transition in addition to a scheduled operation. - I don't see a valid reason for updating the state unless both `scheduled` and `transition` are empty. commit 5a09694dec4057e4c53351ef4de3fccf2252aaf3 Author: ickshonpe Date: Sun Jun 12 19:14:48 2022 +0000 Overflow::Hidden doesn't work correctly with scale_factor_override (#3854) # Objective Overflow::Hidden doesn't work correctly with scale_factor_override. If you run the Bevy UI example with scale_factor_override 3 you'll see half clipped text around the edges of the scrolling listbox. The problem seems to be that the corners of the node are transformed before the amount of clipping required is calculated. But then that transformed clip is compared to the original untransformed size of the node rect to see if it should be culled or not. With a higher scale factor the relative size of the untransformed node rect is going to be really big, so the overflow isn't culled. # Solution Multiply the size of the node rect by extracted_uinode.transform before the cull test. commit f969c62f7bfaf1932fbb533dfd87467aeddf75cb Author: François Date: Sat Jun 11 20:10:13 2022 +0000 Fix wasm examples (#4967) # Objective Fix #4958 There was 4 issues: - this is not true in WASM and on macOS: https://github.com/bevyengine/bevy/blob/f28b92120920f387020f3b3e858f0e7039b9c07e/examples/3d/split_screen.rs#L90 - ~~I made sure the system was running at least once~~ - I'm sending the event on window creation - in webgl, setting a viewport has impacts on other render passes - only in webgl and when there is a custom viewport, I added a render pass without a custom viewport - shaderdef NO_ARRAY_TEXTURES_SUPPORT was not used by the 2d pipeline - webgl feature was used but not declared in bevy_sprite, I added it to the Cargo.toml - shaderdef NO_STORAGE_BUFFERS_SUPPORT was not used by the 2d pipeline - I added it based on the BufferBindingType The last commit changes the two last fixes to add the shaderdefs in the shader cache directly instead of needing to do it in each pipeline Co-authored-by: Carter Anderson commit 772d15238c2edd6eb6006a71e74bc64fddaca7f1 Author: Aevyrie Date: Sat Jun 11 09:13:37 2022 +0000 Change default `Image` `FilterMode` to `Linear` (#4465) # Objective - Closes #4464 ## Solution - Specify default mag and min filter types for `Image` instead of using `wgpu`'s defaults. --- ## Changelog ### Changed - Default `Image` filtering changed from `Nearest` to `Linear`. Co-authored-by: Carter Anderson commit 728d9696d73eef2675cce699fe1896f5c13701bd Author: François Date: Sat Jun 11 08:56:26 2022 +0000 fix nightly for miri to 2022-06-08 to avoid timeouts (#4984) # Objective - Fix timeout in miri ## Solution - Use a nightly version from before the issue happened: 2022-06-08 - To be checked after https://github.com/rust-lang/miri/issues/2223 is fixed commit 57b4620a7dc4acafc7ef00d5466caf4b09f9e3d2 Author: LoipesMas <46327403+LoipesMas@users.noreply.github.com> Date: Thu Jun 9 21:18:16 2022 +0000 Fix Good-First-Issue label in CONTRIBUTING.md (#4979) # Objective - CONTRIBUTING.md references wrong issue label, making it potentially confusing for new contributors. ## Solution - Update CONTRIBUTING.md. --- I assume the label was changed recently. commit e6f34ba47f5d8face2f154dce7bc93bc034017fb Author: Gino Valente Date: Thu Jun 9 21:18:15 2022 +0000 bevy_reflect: Add statically available type info for reflected types (#4042) # Objective > Resolves #4504 It can be helpful to have access to type information without requiring an instance of that type. Especially for `Reflect`, a lot of the gathered type information is known at compile-time and should not necessarily require an instance. ## Solution Created a dedicated `TypeInfo` enum to store static type information. All types that derive `Reflect` now also implement the newly created `Typed` trait: ```rust pub trait Typed: Reflect { fn type_info() -> &'static TypeInfo; } ``` > Note: This trait was made separate from `Reflect` due to `Sized` restrictions. If you only have access to a `dyn Reflect`, just call `.get_type_info()` on it. This new trait method on `Reflect` should return the same value as if you had called it statically. If all you have is a `TypeId` or type name, you can get the `TypeInfo` directly from the registry using the `TypeRegistry::get_type_info` method (assuming it was registered). ### Usage Below is an example of working with `TypeInfo`. As you can see, we don't have to generate an instance of `MyTupleStruct` in order to get this information. ```rust #[derive(Reflect)] struct MyTupleStruct(usize, i32, MyStruct); let info = MyTupleStruct::type_info(); if let TypeInfo::TupleStruct(info) = info { assert!(info.is::()); assert_eq!(std::any::type_name::(), info.type_name()); assert!(info.field_at(1).unwrap().is::()); } else { panic!("Expected `TypeInfo::TupleStruct`"); } ``` ### Manual Implementations It's not recommended to manually implement `Typed` yourself, but if you must, you can use the `TypeInfoCell` to automatically create and manage the static `TypeInfo`s for you (which is very helpful for blanket/generic impls): ```rust use bevy_reflect::{Reflect, TupleStructInfo, TypeInfo, UnnamedField}; use bevy_reflect::utility::TypeInfoCell; struct Foo(T); impl Typed for Foo { fn type_info() -> &'static TypeInfo { static CELL: TypeInfoCell = TypeInfoCell::generic(); CELL.get_or_insert::(|| { let fields = [UnnamedField::new::()]; let info = TupleStructInfo::new::(&fields); TypeInfo::TupleStruct(info) }) } } ``` ## Benefits One major benefit is that this opens the door to other serialization methods. Since we can get all the type info at compile time, we can know how to properly deserialize something like: ```rust #[derive(Reflect)] struct MyType { foo: usize, bar: Vec } // RON to be deserialized: ( type: "my_crate::MyType", // <- We now know how to deserialize the rest of this object value: { // "foo" is a value type matching "usize" "foo": 123, // "bar" is a list type matching "Vec" with item type "String" "bar": ["a", "b", "c"] } ) ``` Not only is this more compact, but it has better compatibility (we can change the type of `"foo"` to `i32` without having to update our serialized data). Of course, serialization/deserialization strategies like this may need to be discussed and fully considered before possibly making a change. However, we will be better equipped to do that now that we can access type information right from the registry. ## Discussion Some items to discuss: 1. Duplication. There's a bit of overlap with the existing traits/structs since they require an instance of the type while the type info structs do not (for example, `Struct::field_at(&self, index: usize)` and `StructInfo::field_at(&self, index: usize)`, though only `StructInfo` is accessible without an instance object). Is this okay, or do we want to handle it in another way? 2. Should `TypeInfo::Dynamic` be removed? Since the dynamic types don't have type information available at runtime, we could consider them `TypeInfo::Value`s (or just even just `TypeInfo::Struct`). The intention with `TypeInfo::Dynamic` was to keep the distinction from these dynamic types and actual structs/values since users might incorrectly believe the methods of the dynamic type's info struct would map to some contained data (which isn't possible statically). 4. General usefulness of this change, including missing/unnecessary parts. 5. Possible changes to the scene format? (One possible issue with changing it like in the example above might be that we'd have to be careful when handling generic or trait object types.) ## Compile Tests I ran a few tests to compare compile times (as suggested [here](https://github.com/bevyengine/bevy/pull/4042#discussion_r876408143)). I toggled `Reflect` and `FromReflect` derive macros using `cfg_attr` for both this PR (aa5178e7736a6f8252e10e543e52722107649d3f) and main (c309acd4322b1c3b2089e247a2d28b938eb7b56d).
See More The test project included 250 of the following structs (as well as a few other structs): ```rust #[derive(Default)] #[cfg_attr(feature = "reflect", derive(Reflect))] #[cfg_attr(feature = "from_reflect", derive(FromReflect))] pub struct Big001 { inventory: Inventory, foo: usize, bar: String, baz: ItemDescriptor, items: [Item; 20], hello: Option, world: HashMap, okay: (isize, usize, /* wesize */), nope: ((String, String), (f32, f32)), blah: Cow<'static, str>, } ``` > I don't know if the compiler can optimize all these duplicate structs away, but I think it's fine either way. We're comparing times, not finding the absolute worst-case time. I only ran each build 3 times using `cargo build --timings` (thank you @devil-ira), each of which were preceeded by a `cargo clean --package bevy_reflect_compile_test`. Here are the times I got: | Test | Test 1 | Test 2 | Test 3 | Average | | -------------------------------- | ------ | ------ | ------ | ------- | | Main | 1.7s | 3.1s | 1.9s | 2.33s | | Main + `Reflect` | 8.3s | 8.6s | 8.1s | 8.33s | | Main + `Reflect` + `FromReflect` | 11.6s | 11.8s | 13.8s | 12.4s | | PR | 3.5s | 1.8s | 1.9s | 2.4s | | PR + `Reflect` | 9.2s | 8.8s | 9.3s | 9.1s | | PR + `Reflect` + `FromReflect` | 12.9s | 12.3s | 12.5s | 12.56s |
--- ## Future Work Even though everything could probably be made `const`, we unfortunately can't. This is because `TypeId::of::()` is not yet `const` (see https://github.com/rust-lang/rust/issues/77125). When it does get stabilized, it would probably be worth coming back and making things `const`. Co-authored-by: MrGVSV <49806985+MrGVSV@users.noreply.github.com> commit 1679a9973825e5241ca91f33919e63667d4eb421 Author: Peter Hebden Date: Thu Jun 9 20:57:43 2022 +0000 Fix typo in game_menu.rs (#4977) Should be trivial # Objective There is a typo in a comment that is fixed in this commit ## Solution Fix the typo `positionned` -> `positioned` --- commit c6958b3056edfb7669da41ff9da103c379d97617 Author: François Date: Thu Jun 9 20:34:09 2022 +0000 add a `SceneBundle` to spawn a scene (#2424) # Objective - Spawning a scene is handled as a special case with a command `spawn_scene` that takes an handle but doesn't let you specify anything else. This is the only handle that works that way. - Workaround for this have been to add the `spawn_scene` on `ChildBuilder` to be able to specify transform of parent, or to make the `SceneSpawner` available to be able to select entities from a scene by their instance id ## Solution Add a bundle ```rust pub struct SceneBundle { pub scene: Handle, pub transform: Transform, pub global_transform: GlobalTransform, pub instance_id: Option, } ``` and instead of ```rust commands.spawn_scene(asset_server.load("models/FlightHelmet/FlightHelmet.gltf#Scene0")); ``` you can do ```rust commands.spawn_bundle(SceneBundle { scene: asset_server.load("models/FlightHelmet/FlightHelmet.gltf#Scene0"), ..Default::default() }); ``` The scene will be spawned as a child of the entity with the `SceneBundle` ~I would like to remove the command `spawn_scene` in favor of this bundle but didn't do it yet to get feedback first~ Co-authored-by: François <8672791+mockersf@users.noreply.github.com> Co-authored-by: Carter Anderson commit cdb62af4bf52f160399bd01936cf3fc5b7afbd81 Author: James Liu Date: Thu Jun 9 03:34:51 2022 +0000 Replace ComponentSparseSet's internals with a Column (#4909) # Objective Following #4855, `Column` is just a parallel `BlobVec`/`Vec>` pair, which is identical to the dense and ticks vecs in `ComponentSparseSet`, which has some code duplication with `Column`. ## Solution Replace dense and ticks in `ComponentSparseSet` with a `Column`. commit f2b545049c9696bdce388e939022190c401a0dbc Author: James Liu Date: Thu Jun 9 03:19:31 2022 +0000 Implement FusedIterator for eligible Iterator types (#4942) # Objective Most of our `Iterator` impls satisfy the requirements of `std::iter::FusedIterator`, which has internal specialization that optimizes `Interator::fuse`. The std lib iterator combinators do have a few that rely on `fuse`, so this could optimize those use cases. I don't think we're using any of them in the engine itself, but beyond a light increase in compile time, it doesn't hurt to implement the trait. ## Solution Implement the trait for all eligible iterators in first party crates. Also add a missing `ExactSizeIterator` on an iterator that could use it. commit 012ae07dc87d43a7437bd4200b4a7f04bf99dbf3 Author: James Liu Date: Thu Jun 9 02:43:24 2022 +0000 Add global init and get accessors for all newtyped TaskPools (#2250) Right now, a direct reference to the target TaskPool is required to launch tasks on the pools, despite the three newtyped pools (AsyncComputeTaskPool, ComputeTaskPool, and IoTaskPool) effectively acting as global instances. The need to pass a TaskPool reference adds notable friction to spawning subtasks within existing tasks. Possible use cases for this may include chaining tasks within the same pool like spawning separate send/receive I/O tasks after waiting on a network connection to be established, or allowing cross-pool dependent tasks like starting dependent multi-frame computations following a long I/O load. Other task execution runtimes provide static access to spawning tasks (i.e. `tokio::spawn`), which is notably easier to use than the reference passing required by `bevy_tasks` right now. This PR makes does the following: * Adds `*TaskPool::init` which initializes a `OnceCell`'ed with a provided TaskPool. Failing if the pool has already been initialized. * Adds `*TaskPool::get` which fetches the initialized global pool of the respective type or panics. This generally should not be an issue in normal Bevy use, as the pools are initialized before they are accessed. * Updated default task pool initialization to either pull the global handles and save them as resources, or if they are already initialized, pull the a cloned global handle as the resource. This should make it notably easier to build more complex task hierarchies for dependent tasks. It should also make writing bevy-adjacent, but not strictly bevy-only plugin crates easier, as the global pools ensure it's all running on the same threads. One alternative considered is keeping a thread-local reference to the pool for all threads in each pool to enable the same `tokio::spawn` interface. This would spawn tasks on the same pool that a task is currently running in. However this potentially leads to potential footgun situations where long running blocking tasks run on `ComputeTaskPool`. commit 5ace79ff09f47a41934b6efc299891706cf5cc89 Author: Cai Bingjun <62678643+C-BJ@users.noreply.github.com> Date: Wed Jun 8 17:55:57 2022 +0000 Let the project page support GitHub's new ability to display open source licenses (#4966) Change _LICENSE-APACHE_ and _LICENSE-MIT_ file location Delete _LICENSE_ You can make the license in about on bevy's GitHub page display as **Apache-2.0, MIT licenses found** instead of **View license** commit f28b92120920f387020f3b3e858f0e7039b9c07e Author: Carter Anderson Date: Tue Jun 7 22:22:10 2022 +0000 Add "depth_load_op" configuration to 3d Cameras (#4904) # Objective Users should be able to configure depth load operations on cameras. Currently every camera clears depth when it is rendered. But sometimes later passes need to rely on depth from previous passes. ## Solution This adds the `Camera3d::depth_load_op` field with a new `Camera3dDepthLoadOp` value. This is a custom type because Camera3d uses "reverse-z depth" and this helps us record and document that in a discoverable way. It also gives us more control over reflection + other trait impls, whereas `LoadOp` is owned by the `wgpu` crate. ```rust commands.spawn_bundle(Camera3dBundle { camera_3d: Camera3d { depth_load_op: Camera3dDepthLoadOp::Load, ..default() }, ..default() }); ``` ### two_passes example with the "second pass" camera configured to the default (clear depth to 0.0) ![image](https://user-images.githubusercontent.com/2694663/171743172-46d4fdd5-5090-46ea-abe4-1fbc519f6ee8.png) ### two_passes example with the "second pass" camera configured to "load" the depth ![image](https://user-images.githubusercontent.com/2694663/171743323-74dd9a1d-9c25-4883-98dd-38ca0bed8c17.png) --- ## Changelog ### Added * `Camera3d` now has a `depth_load_op` field, which can configure the Camera's main 3d pass depth loading behavior. commit cbf032419d3a04959cc83d2df64c5f53eeda606e Author: Aevyrie Date: Tue Jun 7 15:23:45 2022 +0000 Refactor `Camera` methods and add viewport rect (#4948) While working on a refactor of `bevy_mod_picking` to include viewport-awareness, I found myself writing these functions to test if a cursor coordinate was inside the camera's rendered area. # Objective - Simplify conversion from physical to logical pixels - Add methods that returns the dimensions of the viewport as a min-max rect --- ## Changelog - Added `Camera::to_logical` - Added `Camera::physical_viewport_rect` - Added `Camera::logical_viewport_rect` commit d51a87cf2868596dffd7bf9cd0f77a4122165769 Author: Johan Klokkhammer Helsing Date: Tue Jun 7 08:14:10 2022 +0000 Recommend posting new plugins in #crates discord channel (#4956) # Objective - Guide people to the right discord channel to post about their new plugin. #showcase was split into multiple channels. ## Solution - recommend posting in #crates commit 73174730e44a9e2fea7f004e2b78581e8e33bddc Author: François Date: Tue Jun 7 02:16:47 2022 +0000 use the default() method in examples instead of Default::default() (#4952) # Objective - Use the `..default()` method in examples instead of `..Default::default()` commit 649e30de0974fbba8477c5c21e5bea24d3efe4cd Author: Danny Date: Tue Jun 7 02:02:53 2022 +0000 update system test example to include using events (#4951) # Objective - Adds an example of testing systems that handle events. I had a hard time figuring out how to do it a couple days ago so figured an official example could be useful. - Fixes #4936 ## Solution - Adds a `Score` resource and an `EnemyDied` event. An `update_score` system updates the score when a new event comes through. I'm not sure the example is great, as this probably isn't how you'd do it in a real game, but I didn't want to change the existing example too much. commit 39ea1bb9b7f03e9803648b2c28c269ce365e56e4 Author: François Date: Mon Jun 6 20:22:51 2022 +0000 run examples in wasm in CI (#4818) # Objective - Run examples in WASM in CI - Fix #4817 ## Solution - on feature `bevy_ci_testing` - add an extra log message before exiting - when building for wasm, read CI config file at compile time - add a simple [playwright](https://playwright.dev) test script that opens the browser then waits for the success log, and takes a screenshot - add a CI job that runs the playwright test for Chromium and Firefox on one example (lighting) and save the screenshots - Firefox screenshot is good (with some clusters visible) - Chromium screenshot is gray, I don't know why but it's logging `GPU stall due to ReadPixels` - Webkit is not enabled for now, to revisit once https://bugs.webkit.org/show_bug.cgi?id=234926 is fixed or worked around - the CI job only runs on bors validation example run: https://github.com/mockersf/bevy/actions/runs/2361673465. The screenshots can be downloaded commit 193998b5d4f3f02d690f3b0fc475db9817ee56f6 Author: François Date: Mon Jun 6 20:00:30 2022 +0000 add NO_STORAGE_BUFFERS_SUPPORT shaderdef when needed (#4949) # Objective - fix #4946 - fix running 3d in wasm ## Solution - since #4867, the imports are splitter differently, and this shader def was not always set correctly depending on the shader used - add it when needed commit 25219a4d18c262a4abf919d9fb73a957da5a028c Author: Wybe Westra Date: Mon Jun 6 17:52:09 2022 +0000 Add transparency examples (#3695) Adds examples demonstrating transparency for 2d, 3d and UI. Fixes #3215. commit 92ddfe8ad40dc8a0270fe01a1d7dec83850e5ee0 Author: ira Date: Mon Jun 6 16:09:16 2022 +0000 Add methods for querying lists of entities. (#4879) # Objective Improve querying ergonomics around collections and iterators of entities. Example how queries over Children might be done currently. ```rust fn system(foo_query: Query<(&Foo, &Children)>, bar_query: Query<(&Bar, &Children)>) { for (foo, children) in &foo_query { for child in children.iter() { if let Ok((bar, children)) = bar_query.get(*child) { for child in children.iter() { if let Ok((foo, children)) = foo_query.get(*child) { // D: } } } } } } ``` Answers #4868 Partially addresses #4864 Fixes #1470 ## Solution Based on the great work by @deontologician in #2563 Added `iter_many` and `many_for_each_mut` to `Query`. These take a list of entities (Anything that implements `IntoIterator>`). `iter_many` returns a `QueryManyIter` iterator over immutable results of a query (mutable data will be cast to an immutable form). `many_for_each_mut` calls a closure for every result of the query, ensuring not aliased mutability. This iterator goes over the list of entities in order and returns the result from the query for it. Skipping over any entities that don't match the query. Also added `unsafe fn iter_many_unsafe`. ### Examples ```rust #[derive(Component)] struct Counter { value: i32 } #[derive(Component)] struct Friends { list: Vec, } fn system( friends_query: Query<&Friends>, mut counter_query: Query<&mut Counter>, ) { for friends in &friends_query { for counter in counter_query.iter_many(&friends.list) { println!("Friend's counter: {:?}", counter.value); } counter_query.many_for_each_mut(&friends.list, |mut counter| { counter.value += 1; println!("Friend's counter: {:?}", counter.value); }); } } ``` Here's how example in the Objective section can be written with this PR. ```rust fn system(foo_query: Query<(&Foo, &Children)>, bar_query: Query<(&Bar, &Children)>) { for (foo, children) in &foo_query { for (bar, children) in bar_query.iter_many(children) { for (foo, children) in foo_query.iter_many(children) { // :D } } } } ``` ## Additional changes Implemented `IntoIterator` for `&Children` because why not. ## Todo - Bikeshed! Co-authored-by: deontologician Co-authored-by: devil-ira commit b47291264b4d2508e288b4ec122589d3aae7a0e5 Author: dataphract Date: Mon Jun 6 15:47:52 2022 +0000 diagnostics: meaningful error when graph node has wrong number of inputs (#4924) # Objective Currently, providing the wrong number of inputs to a render graph node triggers this assertion: ``` thread 'main' panicked at 'assertion failed: `(left == right)` left: `1`, right: `2`', /[redacted]/bevy/crates/bevy_render/src/renderer/graph_runner.rs:164:13 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace ``` This does not provide the user any context. ## Solution Add a new `RenderGraphRunnerError` variant to handle this case. The new message looks like this: ``` ERROR bevy_render::renderer: Error running render graph: ERROR bevy_render::renderer: > node (name: 'Some("outline_pass")') has 2 input slots, but was provided 1 values ``` --- ## Changelog ### Changed `RenderGraphRunnerError` now has a new variant, `MismatchedInputCount`. ## Migration Guide Exhaustive matches on `RenderGraphRunnerError` will need to add a branch to handle the new `MismatchedInputCount` variant. commit c4080c68323c396261ef130d2eece4d8e5732a75 Author: Yutao Yuan Date: Mon Jun 6 15:47:51 2022 +0000 Fix release workflow (#4903) # Objective While playing with the code, I found some problems in the recently merged version-bumping workflow: - Most importantly, now that we are using `0.8.0-dev` in development, the workflow will try to bump it to `0.9.0` :sob: - The crate filter is outdated now that we have more crates in `tools`. - We are using `bevy@users.noreply.github.com`, but according to [Github help](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-email-preferences/setting-your-commit-email-address#about-commit-email-addresses), that email address means "old no-reply email format for the user `bevy`". It is currently not associated with any account, but I feel this is still not appropriate here. ## Solution - Create a new workflow, `Post-release version bump`, that should be run after a release and bumps version from `0.X.0` to `0.X+1.0-dev`. Unfortunately, cargo-release doesn't have a builtin way to do this, so we need to parse and increment the version manually. - Add the new crates in `tools` to exclusion list. Also removes the dependency version specifier from `bevy_ecs_compile_fail_tests`. It is not in the workspace so the dependency version will not get automatically updated by cargo-release. - Change the author email to `41898282+github-actions[bot]@users.noreply.github.com`. According to the discussion [here](https://github.com/actions/checkout/issues/13#issuecomment-724415212) and [here](https://github.community/t/github-actions-bot-email-address/17204/6), this is the email address associated with the github-actions bot account. - Also add the workflows to our release checklist. See infmagic2047#5 and infmagic2047#6 for examples of release and post-release PRs. commit 85cd0eb445b6fbad6bcff86a37ccb0e59cf948ea Author: TheRawMeatball Date: Mon Jun 6 14:46:41 2022 +0000 Add ParallelCommands system parameter (#4749) (follow-up to #4423) # Objective Currently, it isn't possible to easily fire commands from within par_for_each blocks. This PR allows for issuing commands from within parallel scopes. commit 2f5a1c6e1699cae7791f8122598116e233894a72 Author: Yoshiera Date: Mon Jun 6 14:24:41 2022 +0000 remove redundant query parameters (#4945) # Objective In the `queue_custom` system in `shader_instancing` example, the query of `material_meshes` has a redundant `With>` query filter because `Handle` is included in the component access. ## Solution Remove the `With>` filter commit 765bd46c2eb853a6921c4de47c065e4850ede5fe Author: Thierry Berger Date: Mon Jun 6 00:06:49 2022 +0000 add a post-processing example (#4797) # Objective - Add an example showing a custom post processing effect, done after the first rendering pass. ## Solution - A simple post processing "chromatic aberration" effect. I mixed together examples `3d/render_to_texture`, and `shader/shader_material_screenspace_texture` - Reading a bit how https://github.com/bevyengine/bevy/pull/3430 was done gave me pointers to apply the main pass to the 2d render rather than using a 3d quad. This work might be or not be relevant to https://github.com/bevyengine/bevy/issues/2724
⚠️ Click for a video of the render ⚠️ I’ve been told it might hurt the eyes 👀 , maybe we should choose another effect just in case ? https://user-images.githubusercontent.com/2290685/169138830-a6dc8a9f-8798-44b9-8d9e-449e60614916.mp4
# Request for feedbacks - [ ] Is chromatic aberration effect ok ? (Correct term, not a danger for the eyes ?) I'm open to suggestion to make something different. - [ ] Is the code idiomatic ? I preferred a "main camera -> **new camera with post processing applied to a quad**" approach to emulate minimum modification to existing code wanting to add global post processing. --- ## Changelog - Add a full screen post processing shader example commit 5e2cfb2f19e0e2287fd48d55df92f78617acf831 Author: Carter Anderson Date: Sun Jun 5 00:27:49 2022 +0000 Camera Driven Viewports (#4898) # Objective Users should be able to render cameras to specific areas of a render target, which enables scenarios like split screen, minimaps, etc. Builds on the new Camera Driven Rendering added here: #4745 Fixes: #202 Alternative to #1389 and #3626 (which are incompatible with the new Camera Driven Rendering) ## Solution ![image](https://user-images.githubusercontent.com/2694663/171560044-f0694f67-0cd9-4598-83e2-a9658c4fed57.png) Cameras can now configure an optional "viewport", which defines a rectangle within their render target to draw to. If a `Viewport` is defined, the camera's `CameraProjection`, `View`, and visibility calculations will use the viewport configuration instead of the full render target. ```rust // This camera will render to the first half of the primary window (on the left side). commands.spawn_bundle(Camera3dBundle { camera: Camera { viewport: Some(Viewport { physical_position: UVec2::new(0, 0), physical_size: UVec2::new(window.physical_width() / 2, window.physical_height()), depth: 0.0..1.0, }), ..default() }, ..default() }); ``` To account for this, the `Camera` component has received a few adjustments: * `Camera` now has some new getter functions: * `logical_viewport_size`, `physical_viewport_size`, `logical_target_size`, `physical_target_size`, `projection_matrix` * All computed camera values are now private and live on the `ComputedCameraValues` field (logical/physical width/height, the projection matrix). They are now exposed on `Camera` via getters/setters This wasn't _needed_ for viewports, but it was long overdue. --- ## Changelog ### Added * `Camera` components now have a `viewport` field, which can be set to draw to a portion of a render target instead of the full target. * `Camera` component has some new functions: `logical_viewport_size`, `physical_viewport_size`, `logical_target_size`, `physical_target_size`, and `projection_matrix` * Added a new split_screen example illustrating how to render two cameras to the same scene ## Migration Guide `Camera::projection_matrix` is no longer a public field. Use the new `Camera::projection_matrix()` method instead: ```rust // Bevy 0.7 let projection = camera.projection_matrix; // Bevy 0.8 let projection = camera.projection_matrix(); ``` commit 8e08e26c253192e9d6f23cfbb8b140556bf0c659 Author: Henry Sloan Date: Sat Jun 4 20:00:01 2022 +0000 Update commented vsync code in example to use present_mode (#4926) # Objective - To fix the broken commented code in `examples/shader/compute_shader_game_of_life.rs` for disabling frame throttling ## Solution - Change the commented code from using the old `WindowDescriptor::vsync` to the new `WindowDescriptor::present_mode` ### Note I chose to use the fully qualified scope `bevy::window::PresentWindow::Immediate` rather than explicitly including `PresentWindow` to avoid an unused import when the code is commented. commit 3a9383f9977bf16461565633426777a62cb51356 Author: Alice Cecile Date: Sat Jun 4 14:30:44 2022 +0000 Revert ndk-glue to 0.5 to synchronize with winit (#4916) # Objective - Upgrading ndk-glue (our Android interop layer) desynchronized us from winit - This further broke Android builds, see #4905 (oops...) - Reverting to 0.5 should help with this, until the new `winit` version releases - Fixes #4774 and closes #4529 commit 1fcb7d0c2e7086f8c3162b8ea5f9db3ff5f829c9 Author: Matthias Deiml Date: Fri Jun 3 20:50:50 2022 +0000 Set naga capabilities corresponding to wgpu features (#4824) # Objective At the moment all extra capabilities are disabled when validating shaders with naga: https://github.com/bevyengine/bevy/blob/c7c08f95cb784afc366eb2dcedd21d9d40e72d32/crates/bevy_render/src/render_resource/shader.rs#L146-L149 This means these features can't be used even if the corresponding wgpu features are active. ## Solution With these changes capabilities are now set corresponding to `RenderDevice::features`. --- I have validated these changes for push constants with a project I am currently working on. Though bevy does not support creating pipelines with push constants yet, so I was only able to see that shaders are validated and compiled as expected. commit f0218b9b2b41eec0c48cb7e41294cd397d95f385 Author: Christopher Durham Date: Fri Jun 3 20:28:44 2022 +0000 Move primitive type registration into bevy_reflect (#4844) # Objective - Users of bevy_reflect probably always want primitive types registered. ## Solution - Register them by default. --- This is a minor incremental change along the path of [removing catch-all functionality from bevy_core](https://github.com/bevyengine/bevy/issues/2931). commit 9976ecb8107b9f289ea95f9d46708b1179996659 Author: Alex Saveau Date: Thu Jun 2 19:42:20 2022 +0000 Fix crash when using Duration::MAX (#4900) # Objective If you set the `ReactiveLowPower` max wait to `Duration::MAX`, stuff panics. Fix that. ## Solution Wait forever if addition failed. commit f487407e07c15af878e0d6886f9cd4c146f1f94f Author: Carter Anderson Date: Thu Jun 2 00:12:17 2022 +0000 Camera Driven Rendering (#4745) This adds "high level camera driven rendering" to Bevy. The goal is to give users more control over what gets rendered (and where) without needing to deal with render logic. This will make scenarios like "render to texture", "multiple windows", "split screen", "2d on 3d", "3d on 2d", "pass layering", and more significantly easier. Here is an [example of a 2d render sandwiched between two 3d renders (each from a different perspective)](https://gist.github.com/cart/4fe56874b2e53bc5594a182fc76f4915): ![image](https://user-images.githubusercontent.com/2694663/168411086-af13dec8-0093-4a84-bdd4-d4362d850ffa.png) Users can now spawn a camera, point it at a RenderTarget (a texture or a window), and it will "just work". Rendering to a second window is as simple as spawning a second camera and assigning it to a specific window id: ```rust // main camera (main window) commands.spawn_bundle(Camera2dBundle::default()); // second camera (other window) commands.spawn_bundle(Camera2dBundle { camera: Camera { target: RenderTarget::Window(window_id), ..default() }, ..default() }); ``` Rendering to a texture is as simple as pointing the camera at a texture: ```rust commands.spawn_bundle(Camera2dBundle { camera: Camera { target: RenderTarget::Texture(image_handle), ..default() }, ..default() }); ``` Cameras now have a "render priority", which controls the order they are drawn in. If you want to use a camera's output texture as a texture in the main pass, just set the priority to a number lower than the main pass camera (which defaults to `0`). ```rust // main pass camera with a default priority of 0 commands.spawn_bundle(Camera2dBundle::default()); commands.spawn_bundle(Camera2dBundle { camera: Camera { target: RenderTarget::Texture(image_handle.clone()), priority: -1, ..default() }, ..default() }); commands.spawn_bundle(SpriteBundle { texture: image_handle, ..default() }) ``` Priority can also be used to layer to cameras on top of each other for the same RenderTarget. This is what "2d on top of 3d" looks like in the new system: ```rust commands.spawn_bundle(Camera3dBundle::default()); commands.spawn_bundle(Camera2dBundle { camera: Camera { // this will render 2d entities "on top" of the default 3d camera's render priority: 1, ..default() }, ..default() }); ``` There is no longer the concept of a global "active camera". Resources like `ActiveCamera` and `ActiveCamera` have been replaced with the camera-specific `Camera::is_active` field. This does put the onus on users to manage which cameras should be active. Cameras are now assigned a single render graph as an "entry point", which is configured on each camera entity using the new `CameraRenderGraph` component. The old `PerspectiveCameraBundle` and `OrthographicCameraBundle` (generic on camera marker components like Camera2d and Camera3d) have been replaced by `Camera3dBundle` and `Camera2dBundle`, which set 3d and 2d default values for the `CameraRenderGraph` and projections. ```rust // old 3d perspective camera commands.spawn_bundle(PerspectiveCameraBundle::default()) // new 3d perspective camera commands.spawn_bundle(Camera3dBundle::default()) ``` ```rust // old 2d orthographic camera commands.spawn_bundle(OrthographicCameraBundle::new_2d()) // new 2d orthographic camera commands.spawn_bundle(Camera2dBundle::default()) ``` ```rust // old 3d orthographic camera commands.spawn_bundle(OrthographicCameraBundle::new_3d()) // new 3d orthographic camera commands.spawn_bundle(Camera3dBundle { projection: OrthographicProjection { scale: 3.0, scaling_mode: ScalingMode::FixedVertical, ..default() }.into(), ..default() }) ``` Note that `Camera3dBundle` now uses a new `Projection` enum instead of hard coding the projection into the type. There are a number of motivators for this change: the render graph is now a part of the bundle, the way "generic bundles" work in the rust type system prevents nice `..default()` syntax, and changing projections at runtime is much easier with an enum (ex for editor scenarios). I'm open to discussing this choice, but I'm relatively certain we will all come to the same conclusion here. Camera2dBundle and Camera3dBundle are much clearer than being generic on marker components / using non-default constructors. If you want to run a custom render graph on a camera, just set the `CameraRenderGraph` component: ```rust commands.spawn_bundle(Camera3dBundle { camera_render_graph: CameraRenderGraph::new(some_render_graph_name), ..default() }) ``` Just note that if the graph requires data from specific components to work (such as `Camera3d` config, which is provided in the `Camera3dBundle`), make sure the relevant components have been added. Speaking of using components to configure graphs / passes, there are a number of new configuration options: ```rust commands.spawn_bundle(Camera3dBundle { camera_3d: Camera3d { // overrides the default global clear color clear_color: ClearColorConfig::Custom(Color::RED), ..default() }, ..default() }) commands.spawn_bundle(Camera3dBundle { camera_3d: Camera3d { // disables clearing clear_color: ClearColorConfig::None, ..default() }, ..default() }) ``` Expect to see more of the "graph configuration Components on Cameras" pattern in the future. By popular demand, UI no longer requires a dedicated camera. `UiCameraBundle` has been removed. `Camera2dBundle` and `Camera3dBundle` now both default to rendering UI as part of their own render graphs. To disable UI rendering for a camera, disable it using the CameraUi component: ```rust commands .spawn_bundle(Camera3dBundle::default()) .insert(CameraUi { is_enabled: false, ..default() }) ``` ## Other Changes * The separate clear pass has been removed. We should revisit this for things like sky rendering, but I think this PR should "keep it simple" until we're ready to properly support that (for code complexity and performance reasons). We can come up with the right design for a modular clear pass in a followup pr. * I reorganized bevy_core_pipeline into Core2dPlugin and Core3dPlugin (and core_2d / core_3d modules). Everything is pretty much the same as before, just logically separate. I've moved relevant types (like Camera2d, Camera3d, Camera3dBundle, Camera2dBundle) into their relevant modules, which is what motivated this reorganization. * I adapted the `scene_viewer` example (which relied on the ActiveCameras behavior) to the new system. I also refactored bits and pieces to be a bit simpler. * All of the examples have been ported to the new camera approach. `render_to_texture` and `multiple_windows` are now _much_ simpler. I removed `two_passes` because it is less relevant with the new approach. If someone wants to add a new "layered custom pass with CameraRenderGraph" example, that might fill a similar niche. But I don't feel much pressure to add that in this pr. * Cameras now have `target_logical_size` and `target_physical_size` fields, which makes finding the size of a camera's render target _much_ simpler. As a result, the `Assets` and `Windows` parameters were removed from `Camera::world_to_screen`, making that operation much more ergonomic. * Render order ambiguities between cameras with the same target and the same priority now produce a warning. This accomplishes two goals: 1. Now that there is no "global" active camera, by default spawning two cameras will result in two renders (one covering the other). This would be a silent performance killer that would be hard to detect after the fact. By detecting ambiguities, we can provide a helpful warning when this occurs. 2. Render order ambiguities could result in unexpected / unpredictable render results. Resolving them makes sense. ## Follow Up Work * Per-Camera viewports, which will make it possible to render to a smaller area inside of a RenderTarget (great for something like splitscreen) * Camera-specific MSAA config (should use the same "overriding" pattern used for ClearColor) * Graph Based Camera Ordering: priorities are simple, but they make complicated ordering constraints harder to express. We should consider adopting a "graph based" camera ordering model with "before" and "after" relationships to other cameras (or build it "on top" of the priority system). * Consider allowing graphs to run subgraphs from any nest level (aka a global namespace for graphs). Right now the 2d and 3d graphs each need their own UI subgraph, which feels "fine" in the short term. But being able to share subgraphs between other subgraphs seems valuable. * Consider splitting `bevy_core_pipeline` into `bevy_core_2d` and `bevy_core_3d` packages. Theres a shared "clear color" dependency here, which would need a new home. commit f2b53de4aa5ba10aa052d0ce1378e2395798ca21 Author: François Date: Wed Jun 1 23:05:30 2022 +0000 Do not bundle the assets from wasm example in the crate (#4895) # Objective - Fix #4881 ## Solution - Do not bundle the assets from wasm example in the crate; tested with `cargo package` to check the produced crate commit 5a1866c13dca42f5230d91564809692760acb14a Author: François Date: Wed Jun 1 22:31:24 2022 +0000 Bevy release train - add a workflow to manually create a PR updating Bevy version (#3283) # Objective - Ensure future Bevy releases happens smoothly ## Solution - Add a workflow that will open a PR updating all Bevy crate that can be created manually example PR opened: https://github.com/mockersf/bevy/pull/62 The day from this PR does not need to be the release day, it will just open the PR to prepare it. Later if we feel confident, it could push automatically to crates.io. how to trigger the workflow: https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow commit a16ffe6239ed61e6e8df7149706086ebb6b6d7c0 Author: François Date: Wed Jun 1 20:44:30 2022 +0000 create font atlas that can contains fonts of any size (#3592) # Objective - Bevy currently panics when displaying text with a *very* big font size (with font size greater than 400, the glyph would have a width or height greater than 512) ``` thread 'main' panicked at 'Fatal error when processing text: failed to add glyph to newly-created atlas GlyphId(514).', crates/bevy_ui/src/widget/text.rs:118:21 ``` ## Solution - Create font atlas that scales up with the size of the glyphs commit cc4062ec4359b80a8c88a63aa45f17c681bb0e71 Author: Robert Swain Date: Tue May 31 23:23:25 2022 +0000 Split mesh shader files (#4867) # Objective - Split PBR and 2D mesh shaders into types and bindings to prepare the shaders to be more reusable. - See #3969 for details. I'm doing this in multiple steps to make review easier. --- ## Changelog - Changed: 2D and PBR mesh shaders are now split into types and bindings, the following shader imports are available: `bevy_pbr::mesh_view_types`, `bevy_pbr::mesh_view_bindings`, `bevy_pbr::mesh_types`, `bevy_pbr::mesh_bindings`, `bevy_sprite::mesh2d_view_types`, `bevy_sprite::mesh2d_view_bindings`, `bevy_sprite::mesh2d_types`, `bevy_sprite::mesh2d_bindings` ## Migration Guide - In shaders for 3D meshes: - `#import bevy_pbr::mesh_view_bind_group` -> `#import bevy_pbr::mesh_view_bindings` - `#import bevy_pbr::mesh_struct` -> `#import bevy_pbr::mesh_types` - NOTE: If you are using the mesh bind group at bind group index 2, you can remove those binding statements in your shader and just use `#import bevy_pbr::mesh_bindings` which itself imports the mesh types needed for the bindings. - In shaders for 2D meshes: - `#import bevy_sprite::mesh2d_view_bind_group` -> `#import bevy_sprite::mesh2d_view_bindings` - `#import bevy_sprite::mesh2d_struct` -> `#import bevy_sprite::mesh2d_types` - NOTE: If you are using the mesh2d bind group at bind group index 2, you can remove those binding statements in your shader and just use `#import bevy_sprite::mesh2d_bindings` which itself imports the mesh2d types needed for the bindings. commit bdef86ea6eae96383d7bbd4818797f7572572090 Author: Robert Swain Date: Tue May 31 22:53:54 2022 +0000 Generate vertex tangents using mikktspace (#3872) # Objective Models can be produced that do not have vertex tangents but do have normal map textures. The tangents can be generated. There is a way that the vertex tangents can be generated to be exactly invertible to avoid introducing error when recreating the normals in the fragment shader. ## Solution - After attempts to get https://github.com/gltf-rs/mikktspace to integrate simple glam changes and version bumps, and releases of that crate taking weeks / not being made (no offense intended to the authors/maintainers, bevy just has its own timelines and needs to take care of) it was decided to fork that repository. The following steps were taken: - mikktspace was forked to https://github.com/bevyengine/mikktspace in order to preserve the repository's history in case the original is ever taken down - The README in that repo was edited to add a note stating from where the repository was forked and explaining why - The repo was locked for changes as its only purpose is historical - The repo was integrated into the bevy repo using `git subtree add --prefix crates/bevy_mikktspace git@github.com:bevyengine/mikktspace.git master` - In `bevy_mikktspace`: - The travis configuration was removed - `cargo fmt` was run - The `Cargo.toml` was conformed to bevy's (just adding bevy to the keywords, changing the homepage and repository, changing the version to 0.7.0-dev - importantly the license is exactly the same) - Remove the features, remove `nalgebra` entirely, only use `glam`, suppress clippy. - This was necessary because our CI runs clippy with `--all-features` and the `nalgebra` and `glam` features are mutually exclusive, plus I don't want to modify this highly numerically-sensitive code just to appease clippy and diverge even more from upstream. - Rebase https://github.com/bevyengine/bevy/pull/1795 - @jakobhellermann said it was fine to copy and paste but it ended up being almost exactly the same with just a couple of adjustments when validating correctness so I decided to actually rebase it and then build on top of it. - Use the exact same fragment shader code to ensure correct normal mapping. - Tested with both https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/NormalTangentMirrorTest which has vertex tangents and https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/NormalTangentTest which requires vertex tangent generation Co-authored-by: alteous commit 27c321e33f1e7420b909b1a4697de4c0aee6d06b Author: François Date: Tue May 31 17:30:31 2022 +0000 run examples on windows (#4437) # Objective - ~~Running examples on Linux in CI timeout~~Linux is back! - But hey we can run examples on windows too! ## Solution - Run examples on windows daily - I also added a 30 minutes timeout so that when it explodes, it doesn't explodes in 6 hours (the default timeout) - And simplified the linux examples by not requiring a custom feature set commit ae0ccfb4f6d41d2033be03b48e2538d1cf0cd86f Author: Daniikk1012 Date: Tue May 31 17:14:12 2022 +0000 Make ScalingMode more flexible (#3253) Adds ability to specify scaling factor for `WindowSize`, size of the fixed axis for `FixedVertical` and `FixedHorizontal` and a new `ScalingMode` that is a mix of `FixedVertical` and `FixedHorizontal` # The issue Currently, only available options are to: * Have one of the axes fixed to value 1 * Have viewport size match the window size * Manually adjust viewport size In most of the games these options are not enough and more advanced scaling methods have to be used ## Solution The solution is to provide additional parameters to current scaling modes, like scaling factor for `WindowSize`. Additionally, a more advanced `Auto` mode is added, which dynamically switches between behaving like `FixedVertical` and `FixedHorizontal` depending on the window's aspect ratio. Co-authored-by: Daniikk1012 <49123959+Daniikk1012@users.noreply.github.com> commit caef967d149abad9410bbc6a10654383094272ae Author: Alex Saveau Date: Tue May 31 15:54:38 2022 +0000 Derive default on ReportExecutionOrderAmbiguities (#4873) commit cea23b99692ab74a9ac6280df8e9d39bd69b7eb4 Author: Chris Dawkins Date: Tue May 31 15:37:23 2022 +0000 Update "C-Bug" label and url in CONTRIBUTING.md (#4880) 'bug' is not a valid label. Changed it to "C-Bug". Co-authored-by: siph commit ee4bcbea3c296bb69c680bf7361ee4b234c100fb Author: robtfm <50659922+robtfm@users.noreply.github.com> Date: Tue May 31 02:02:49 2022 +0000 add depth_bias to SpecializedMaterial (#4101) # Objective allow meshes with equal z-depth to be rendered in a chosen order / avoid z-fighting ## Solution add a depth_bias to SpecializedMaterial that is added to the mesh depth used for render-ordering. commit f000c2b951f4c519416ffda70281b2284d37f9f8 Author: Félix Lescaudey de Maneville Date: Tue May 31 01:38:07 2022 +0000 Clippy improvements (#4665) # Objective Follow up to my previous MR #3718 to add new clippy warnings to bevy: - [x] [~~option_if_let_else~~](https://rust-lang.github.io/rust-clippy/master/#option_if_let_else) (reverted) - [x] [redundant_else](https://rust-lang.github.io/rust-clippy/master/#redundant_else) - [x] [match_same_arms](https://rust-lang.github.io/rust-clippy/master/#match_same_arms) - [x] [semicolon_if_nothing_returned](https://rust-lang.github.io/rust-clippy/master/#semicolon_if_nothing_returned) - [x] [explicit_iter_loop](https://rust-lang.github.io/rust-clippy/master/#explicit_iter_loop) - [x] [map_flatten](https://rust-lang.github.io/rust-clippy/master/#map_flatten) There is one commit per clippy warning, and the matching flags are added to the CI execution. To test the CI execution you may run `cargo run -p ci -- clippy` at the root. I choose the add the flags in the `ci` tool crate to avoid having them in every `lib.rs` but I guess it could become an issue with suprise warnings coming up after a commit/push Co-authored-by: Carter Anderson commit e543941fb9ffed652f99f96301e28d3d7f6e3e6f Author: Giacomo Stevanato Date: Mon May 30 22:45:09 2022 +0000 Improve soundness of `CommandQueue` (#4863) # Objective This PR aims to improve the soundness of `CommandQueue`. In particular it aims to: - make it sound to store commands that contain padding or uninitialized bytes; - avoid uses of commands after moving them in the queue's buffer (`std::mem::forget` is technically a use of its argument); - remove useless checks: `self.bytes.as_mut_ptr().is_null()` is always `false` because even `Vec`s that haven't allocated use a dangling pointer. Moreover the same pointer was used to write the command, so it ought to be valid for reads if it was for writes. ## Solution - To soundly store padding or uninitialized bytes `CommandQueue` was changed to contain a `Vec>` instead of `Vec`; - To avoid uses of the command through `std::mem::forget`, `ManuallyDrop` was used. ## Other observations While writing this PR I noticed that `CommandQueue` doesn't seem to drop the commands that weren't applied. While this is a pretty niche case (you would have to be manually using `CommandQueue`/`std::mem::swap`ping one), I wonder if it should be documented anyway. commit 48289984ea869b86fbeca2187720ec0a4a597992 Author: Niklas Eicker Date: Mon May 30 22:28:32 2022 +0000 Add license files to all published crates (#4828) # Objective Add our licenses to every published crate Fixes #4719 ## Solution - Copy licenses to every crate before publishing commit 53bcecbbfc3218991218c37536607da620bd61ee Author: Alice Cecile Date: Mon May 30 22:11:44 2022 +0000 Make bug template more beginner-friendly (#4652) # Objective 1. "What you expected to happen" and "what actually happened" often involves trivial duplication. 2. "Please provide full reproduction steps" is not helpful advice to new contributors. 3. The OS field was commonly useless or inadequate. 4. The description for "additional information" effectively just repeated the title of the field. ## Solution 1. Unify these fields into a single "what went wrong" field. 2. Provide an example of a useful reproduction. 3. Replace OS field with an optional "Setup Information" field that captures information about other critical setup like Rust version and hardware. 4. Provide helpful advice about what sort of information may be useful to add. commit ef032040dd961177337390326c1660cf9120f792 Author: ira Date: Mon May 30 21:16:48 2022 +0000 Cargo `--timings` option has been stabilized. Update profiling.md. (#4850) As of https://github.com/rust-lang/cargo/pull/10245 `--timings` has been stabilized. Update profiling.md to reflect this. Co-authored-by: devil-ira commit d313ba59bdf750375eec0464dd30b2d111dc817f Author: James Liu Date: Mon May 30 21:16:47 2022 +0000 Don't allocate for ComponentDescriptors of non-dynamic component types (#4725) # Objective Don't allocate memory for Component types known at compile-time. Save a bit of memory. ## Solution Change `ComponentDescriptor::name` from `String` to `Cow<'static, str>` to use the `&'static str` returned by `std::any::type_name`. commit c174945208080e8c9b6755957c5009140fa7c670 Author: James Liu Date: Mon May 30 20:57:33 2022 +0000 Fix release builds: Move asserts under #[cfg(debug_assertions)] (#4871) # Objective `debug_assert!` macros must still compile properly in release mode due to how they're implemented. This is causing release builds to fail. ## Solution Change them to `assert!` macros inside `#[cfg(debug_assertions)]` blocks. commit 4b7f904cfc97ca901d649abe801e5a1e681cb040 Author: Jakob Hellermann Date: Mon May 30 20:22:57 2022 +0000 remove `Serialize` impl for dyn Array and friends (#4780) # Objective `bevy_reflect` as different kinds of reflected types (each with their own trait), `trait Struct: Reflect`, `trait List: Reflect`, `trait Map: Reflect`, ... Types that don't fit either of those are called reflect value types, they are opaque and can't be deconstructed further. `bevy_reflect` can serialize `dyn Reflect` values. Any container types (struct, list, map) get deconstructed and their elements serialized separately, which can all happen without serde being involved ever (happens [here](https://github.com/bevyengine/bevy/blob/main/crates/bevy_reflect/src/serde/ser.rs#L50-L85=)). The only point at which we require types to be serde-serializable is for *value types* (happens [here](https://github.com/bevyengine/bevy/blob/main/crates/bevy_reflect/src/serde/ser.rs#L104=)). So reflect array serializing is solved, since arrays are container types which don't require serde. #1213 also introduced added the `serialize` method and `Serialize` impls for `dyn Array` and `DynamicArray` which use their element's `Reflect::serializable` function. This is 1. unnecessary, because it is not used for array serialization, and 2. annoying for removing the `Serialize` bound on container types, because these impls don't have access to the `TypeRegistry`, so we can't move the serialization code there. # Solution Remove these impls and `fn serialize`. It's not used and annoying for other changes. commit 1e8ca45aebd1522f8fa5d93358f266687b5b2861 Author: Robert Swain Date: Mon May 30 20:10:26 2022 +0000 scene_viewer: Make it possible to disable the animation feature (#4849) # Objective - The `scene_viewer` example assumes the `animation` feature is enabled, which it is by default. However, animations may have a performance cost that is undesirable when testing performance, for example. Then it is useful to be able to disable the `animation` feature and one would still like the `scene_viewer` example to work. ## Solution - Gate animation code in `scene_viewer` on the `animation` feature being enabled. commit 575ffa7c0c173d42efd37eaba573f02189b2804d Author: FraserLee Date: Mon May 30 19:58:16 2022 +0000 Added `offset` parameter to `TextureAtlas::from_grid_with_padding` (#4836) # Objective Increase compatibility with a fairly common format of padded spritesheets, in which half the padding value occurs before the first sprite box begins. The original behaviour falls out when `Vec2::ZERO` is used for `offset`. See below unity screenshot for an example of a spritesheet with padding ![Screen Shot 2022-05-24 at 4 11 49 PM](https://user-images.githubusercontent.com/30442265/170123682-287e5733-b69d-452b-b2e6-46d8d29293fb.png) ## Solution Tiny change to `crates/bevy_sprite/src/texture_atlas.rs` ## Migration Guide Calls to `TextureAtlas::from_grid_with_padding` should be modified to include a new parameter, which can be set to `Vec2::ZERO` to retain old behaviour. ```rust from_grid_with_padding(texture, tile_size, columns, rows, padding) | V from_grid_with_padding(texture, tile_size, columns, rows, padding, Vec2::ZERO) ``` Co-authored-by: FraserLee <30442265+FraserLee@users.noreply.github.com> commit fb813a3a64cdafdd75f7590a461364bcdf34bf2e Author: Gino Valente Date: Mon May 30 19:06:25 2022 +0000 bevy_reflect: Add `#[reflect(default)]` attribute for `FromReflect` (#4140) # Objective Currently, `FromReflect` makes a couple assumptions: * Ignored fields must implement `Default` * Active fields must implement `FromReflect` * The reflected must be fully populated for active fields (can't use an empty `DynamicStruct`) However, one or both of these requirements might be unachievable, such as for external types. In these cases, it might be nice to tell `FromReflect` to use a custom default. ## Solution Added the `#[reflect(default)]` derive helper attribute. This attribute can be applied to any field (ignored or not) and will allow a default value to be specified in place of the regular `from_reflect()` call. It takes two forms: `#[reflect(default)]` and `#[reflect(default = "some_func")]`. The former specifies that `Default::default()` should be used while the latter specifies that `some_func()` should be used. This is pretty much [how serde does it](https://serde.rs/field-attrs.html#default). ### Example ```rust #[derive(Reflect, FromReflect)] struct MyStruct { // Use `Default::default()` #[reflect(default)] foo: String, // Use `get_bar_default()` #[reflect(default = "get_bar_default")] #[reflect(ignore)] bar: usize, } fn get_bar_default() -> usize { 123 } ``` ### Active Fields As an added benefit, this also allows active fields to be completely missing from their dynamic object. This is because the attribute tells `FromReflect` how to handle missing active fields (it still tries to use `from_reflect` first so the `FromReflect` trait is still required). ```rust let dyn_struct = DynamicStruct::default(); // We can do this without actually including the active fields since they have `#[reflect(default)]` let my_struct = ::from_reflect(&dyn_struct); ``` ### Container Defaults Also, with the addition of #3733, people will likely start adding `#[reflect(Default)]` to their types now. Just like with the fields, we can use this to mark the entire container as "defaultable". This grants us the ability to completely remove the field markers altogether if our type implements `Default` (and we're okay with fields using that instead of their own `Default` impls): ```rust #[derive(Reflect, FromReflect)] #[reflect(Default)] struct MyStruct { foo: String, #[reflect(ignore)] bar: usize, } impl Default for MyStruct { fn default() -> Self { Self { foo: String::from("Hello"), bar: 123, } } } // Again, we can now construct this from nothing pretty much let dyn_struct = DynamicStruct::default(); let my_struct = ::from_reflect(&dyn_struct); ``` Now if _any_ field is missing when using `FromReflect`, we simply fallback onto the container's `Default` implementation. This behavior can be completely overridden on a per-field basis, of course, by simply defining those same field attributes like before. ### Related * #3733 * #1395 * #2377 --- ## Changelog * Added `#[reflect(default)]` field attribute for `FromReflect` * Allows missing fields to be given a default value when using `FromReflect` * `#[reflect(default)]` - Use the field's `Default` implementation * `#[reflect(default = "some_fn")]` - Use a custom function to get the default value * Allow `#[reflect(Default)]` to have a secondary usage as a container attribute * Allows missing fields to be given a default value based on the container's `Default` impl when using `FromReflect` Co-authored-by: Gino Valente <49806985+MrGVSV@users.noreply.github.com> commit a0a3d8798bd40e1214e9fbca4a4c189bc6375058 Author: Robert Swain Date: Mon May 30 18:36:03 2022 +0000 ExtractResourcePlugin (#3745) # Objective - Add an `ExtractResourcePlugin` for convenience and consistency ## Solution - Add an `ExtractResourcePlugin` similar to `ExtractComponentPlugin` but for ECS `Resource`s. The system that is executed simply clones the main world resource into a render world resource, if and only if the main world resource was either added or changed since the last execution of the system. - Add an `ExtractResource` trait with a `fn extract_resource(res: &Self) -> Self` function. This is used by the `ExtractResourcePlugin` to extract the resource - Add a derive macro for `ExtractResource` on a `Resource` with the `Clone` trait, that simply returns `res.clone()` - Use `ExtractResourcePlugin` wherever both possible and appropriate commit ba53a44956560fd7e6c14c05ddcb9925522fc80c Author: Thomas Hansen <31554741+thomas992@users.noreply.github.com> Date: Mon May 30 18:13:36 2022 +0000 Add Clear Linux OS dependencies (#4852) commit aa183ef31a48ceed59134427e0f904e0282756c5 Author: Johan Klokkhammer Helsing Date: Mon May 30 18:13:35 2022 +0000 Derive thiserror::Error for HexColorError (again) (#4847) This was first done in 7b4e3a5, but was then reverted when the new renderer for 0.6 was merged (ffecb05). I'm assuming it was simply a mistake when merging. # Objective - Same as #2740, I think it was reverted by mistake when merging. > # Objective > > - Make it easy to use HexColorError with `thiserror`, i.e. converting it into other error types. > > Makes this possible: > > ```rust > #[derive(Debug, thiserror::Error)] > pub enum LdtkError { > #[error("An error occured while deserializing")] > Json(#[from] serde_json::Error), > #[error("An error occured while parsing a color")] > HexColor(#[from] bevy::render::color::HexColorError), > } > ``` > > ## Solution > > - Derive thiserror::Error the same way we do elsewhere (see query.rs for instance) commit 80b08ea45dadafd15442eea5d2e508c20a2187b7 Author: Daniel McNab <36049421+DJMcNab@users.noreply.github.com> Date: Mon May 30 17:59:20 2022 +0000 Allow higher order systems (#4833) # Objective - Higher order system could not be created by users. - However, a simple change to `SystemParamFunction` allows this. - Higher order systems in this case mean functions which return systems created using other systems, such as `chain` (which is basically equivalent to map) ## Solution - Change `SystemParamFunction` to be a safe abstraction over `FnMut([In,] ...params)->Out`. - Note that I believe `SystemParamFunction` should not have been counted as part of our public api before this PR. - This is because its only use was an unsafe function without an actionable safety comment. - The safety comment was basically 'call this within bevy code'. - I also believe that there are no external users in its current form. - A quick search on Google and in the discord confirmed this. ## See also - https://github.com/bevyengine/bevy/pull/4666, which uses this and subsumes the example here --- ## Changelog ### Added - `SystemParamFunction`, which can be used to create higher order systems. commit c46691c04a67c77b0ed2a3f98e9f8bdcdebf7cf6 Author: James Liu Date: Mon May 30 17:26:23 2022 +0000 Update gilrs to v0.9 (#4848) # Objective Fixes #4353. Fixes #4431. Picks up fixes for a panic for `gilrs` when `getGamepads()` is not available. ## Solution Update the `gilrs` to `v0.9.0`. Changelog can be seen here: https://gitlab.com/gilrs-project/gilrs/-/commit/dba36f91869c823fb66f8633d2b650b430682f80 EDIT: Updated `uuid` to 1.1 to avoid duplicate dependencies. Added `nix`'s two dependencies as exceptions until `rodio` updates their deps. commit a6eb3fa6d68bb4a70c9c50927b41a70b894a7c50 Author: Herbert "TheBracket Date: Mon May 30 16:59:45 2022 +0000 Apply vertex colors to ColorMaterial and Mesh2D (#4812) # Objective - Add Vertex Color support to 2D meshes and ColorMaterial. This extends the work from #4528 (which in turn builds on the excellent tangent handling). ## Solution - Added `#ifdef` wrapped support for vertex colors in the 2D mesh shader and `ColorMaterial` shader. - Added an example, `mesh2d_vertex_color_texture` to demonstrate it in action. ![image](https://user-images.githubusercontent.com/14896751/169530930-6ae0c6be-2f69-40e3-a600-ba91d7178bc3.png) --- ## Changelog - Added optional (ifdef wrapped) vertex color support to the 2dmesh and color material systems. commit 1bbd5c25c0e53c51ac4a3bc1dc46fd2f35e72883 Author: Daniel McNab <36049421+DJMcNab@users.noreply.github.com> Date: Mon May 30 16:59:44 2022 +0000 Enforce type safe usage of Handle::get (#4794) # Objective - Sometimes, people might load an asset as one type, then use it with an `Asset`s for a different type. - See e.g. #4784. - This is especially likely with the Gltf types, since users may not have a clear conceptual model of what types the assets will be. - We had an instance of this ourselves, in the `scene_viewer` example ## Solution - Make `Assets::get` require a type safe handle. --- ## Changelog ### Changed - `Assets::::get` and `Assets::::get_mut` now require that the passed handles are `Handle`, improving the type safety of handles. ### Added - `HandleUntyped::typed_weak`, a helper function for creating a weak typed version of an exisitng `HandleUntyped`. ## Migration Guide `Assets::::get` and `Assets::::get_mut` now require that the passed handles are `Handle`, improving the type safety of handles. If you were previously passing in: - a `HandleId`, use `&Handle::weak(id)` instead, to create a weak handle. You may have been able to store a type safe `Handle` instead. - a `HandleUntyped`, use `&handle_untyped.typed_weak()` to create a weak handle of the specified type. This is most likely to be the useful when using [load_folder](https://docs.rs/bevy_asset/latest/bevy_asset/struct.AssetServer.html#method.load_folder) - a `Handle` of of a different type, consider whether this is the correct handle type to store. If it is (i.e. the same handle id is used for multiple different Asset types) use `Handle::weak(handle.id)` to cast to a different type. commit a02c5ae819001b0d1ceb6cf7dba8b4f0d7975c15 Author: James Liu Date: Mon May 30 16:59:43 2022 +0000 Copy TaskPool resoures to subapps (#4792) # Objective Fixes #4791. `ParallelExecutor` inserts a default `CompteTaskPool` if there isn't one stored as a resource, including when it runs on a different world. When spawning the render sub-app, the main world's `ComputeTaskPool` is not cloned and inserted into the render app's, which causes a second `ComputeTaskPool` with the default configuration to be spawned. This results in an excess number of threads being spawned. ## Solution Copy the task pools from the main world to the subapps upon creating them. ## Alternative An alternative to this would be to make the task pools global, as seen in #2250 or bevyengine/rfcs#54. commit 6a238377beba9717ecc25a7037ab58b7123d9858 Author: Hristo Iliev Date: Mon May 30 16:59:41 2022 +0000 Add documentation to the WindowDescriptor struct. (#4764) # Objective Resolves #4753 ## Solution Using rust doc I added documentation to the struct. Decided to not provide an example in the doc comment but instead refer to the example file that shows the usage. commit 8e4e5a5634a22579ff0ce450e4a9eaa173638e47 Author: James Liu Date: Mon May 30 16:59:40 2022 +0000 Use u32 over usize for ComponentSparseSet indicies (#4723) # Objective Use less memory to store SparseSet components. ## Solution Change `ComponentSparseSet` to only use `Entity::id` in it's key internally, and change the usize value in it's SparseArray to use u32 instead, as it cannot have more than u32::MAX live entities stored at once. This should reduce the overhead of storing components in sparse set storage by 50%. commit c5e89894f45e694f882a0e2831af1d2452bac257 Author: James Liu Date: Mon May 30 16:59:38 2022 +0000 Remove task_pool parameter from par_for_each(_mut) (#4705) # Objective Fixes #3183. Requiring a `&TaskPool` parameter is sort of meaningless if the only correct one is to use the one provided by `Res` all the time. ## Solution Have `QueryState` save a clone of the `ComputeTaskPool` which is used for all `par_for_each` functions. ~~Adds a small overhead of the internal `Arc` clone as a part of the startup, but the ergonomics win should be well worth this hardly-noticable overhead.~~ Updated the docs to note that it will panic the task pool is not present as a resource. # Future Work If https://github.com/bevyengine/rfcs/pull/54 is approved, we can replace these resource lookups with a static function call instead to get the `ComputeTaskPool`. --- ## Changelog Removed: The `task_pool` parameter of `Query(State)::par_for_each(_mut)`. These calls will use the `World`'s `ComputeTaskPool` resource instead. ## Migration Guide The `task_pool` parameter for `Query(State)::par_for_each(_mut)` has been removed. Remove these parameters from all calls to these functions. Before: ```rust fn parallel_system( task_pool: Res, query: Query<&MyComponent>, ) { query.par_for_each(&task_pool, 32, |comp| { ... }); } ``` After: ```rust fn parallel_system(query: Query<&MyComponent>) { query.par_for_each(32, |comp| { ... }); } ``` If using `Query(State)` outside of a system run by the scheduler, you may need to manually configure and initialize a `ComputeTaskPool` as a resource in the `World`. commit f59ea7e6e8a5c1d08391fa2304992d19150d2953 Author: James Liu Date: Mon May 30 16:41:34 2022 +0000 Remove redundant ComponentId in Column (#4855) # Objective The `ComponentId` in `Column` is redundant as it's stored in parallel in the surrounding `SparseSet` all the time. ## Solution Remove it. Add `SparseSet::iter(_mut)` to parallel `HashMap::iter(_mut)` to allow iterating pairs of columns and their IDs. --- ## Changelog Added: `SparseSet::iter` and `SparseSet::iter_mut`. commit c02beabe22b398f918a49ae6b68f05094a64ffd9 Author: Hennadii Chernyshchyk Date: Mon May 30 16:41:33 2022 +0000 Add QueryState::get_single_unchecked_manual and its family (#4841) # Objective - Rebase of #3159. - Fixes https://github.com/bevyengine/bevy/issues/3156 - add #[inline] to single related functions so that they matches with other function defs ## Solution * added functions to QueryState * get_single_unchecked_manual * get_single_unchecked * get_single * get_single_mut * single * single_mut * make Query::get_single use QueryState::get_single_unchecked_manual * added #[inline] --- ## Changelog ### Added Functions `QueryState::single`, `QueryState::get_single`, `QueryState::single_mut`, `QueryState::get_single_mut`, `QueryState::get_single_unchecked`, `QueryState::get_single_unchecked_manual`. ### Changed `QuerySingleError` is now in the `state` module. ## Migration Guide Change `query::QuerySingleError` to `state::QuerySingleError` Co-authored-by: 2ne1ugly Co-authored-by: 2ne1ugly <47616772+2ne1ugly@users.noreply.github.com> commit e528b63e11f81f7002cc723e9d829063ee0e7df1 Author: Boxy Date: Mon May 30 16:41:32 2022 +0000 merge `matches_archetype` and `matches_table` (#4807) # Objective the code in these fns are always identical so stop having two functions ## Solution make them the same function --- ## Changelog change `matches_archetype` and `matches_table` to `fn matches_component_set(&self, &SparseArray) -> bool` then do extremely boring updating of all `FetchState` impls ## Migration Guide - move logic of `matches_archetype` and `matches_table` into `matches_component_set` in any manual `FetchState` impls commit 2f5591ff8c234f6322e3b8d3d585ba5c88118ffc Author: Gino Valente Date: Mon May 30 16:41:31 2022 +0000 bevy_reflect: Improve debug formatting for reflected types (#4218) # Objective Debugging reflected types can be somewhat frustrating since all `dyn Reflect` trait objects return something like `Reflect(core::option::Option)`. It would be much nicer to be able to see the actual value— or even use a custom `Debug` implementation. ## Solution Added `Reflect::debug` which allows users to customize the debug output. It sets defaults for all `ReflectRef` subtraits and falls back to `Reflect(type_name)` if no `Debug` implementation was registered. To register a custom `Debug` impl, users can add `#[reflect(Debug)]` like they can with other traits. ### Example Using the following structs: ```rust #[derive(Reflect)] pub struct Foo { a: usize, nested: Bar, #[reflect(ignore)] _ignored: NonReflectedValue, } #[derive(Reflect)] pub struct Bar { value: Vec2, tuple_value: (i32, String), list_value: Vec, // We can't determine debug formatting for Option yet unknown_value: Option, custom_debug: CustomDebug } #[derive(Reflect)] #[reflect(Debug)] struct CustomDebug; impl Debug for CustomDebug { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "This is a custom debug!") } } pub struct NonReflectedValue { _a: usize, } ``` We can do: ```rust let value = Foo { a: 1, _ignored: NonReflectedValue { _a: 10 }, nested: Bar { value: Vec2::new(1.23, 3.21), tuple_value: (123, String::from("Hello")), list_value: vec![1, 2, 3], unknown_value: Some(String::from("World")), custom_debug: CustomDebug }, }; let reflected_value: &dyn Reflect = &value; println!("{:#?}", reflected_value) ``` Which results in: ```rust Foo { a: 2, nested: Bar { value: Vec2( 1.23, 3.21, ), tuple_value: ( 123, "Hello", ), list_value: [ 1, 2, 3, ], unknown_value: Reflect(core::option::Option), custom_debug: This is a custom debug!, }, } ``` Notice that neither `Foo` nor `Bar` implement `Debug`, yet we can still deduce it. This might be a concern if we're worried about leaking internal values. If it is, we might want to consider a way to exclude fields (possibly with a `#[reflect(hide)]` macro) or make it purely opt in (as opposed to the default implementation automatically handled by ReflectRef subtraits). Co-authored-by: Gino Valente <49806985+MrGVSV@users.noreply.github.com> commit a764d44f1720b913157d6cc112830f2f6f8ae0d3 Author: François Date: Mon May 30 16:21:03 2022 +0000 update xshell to 0.2 (#4789) # Objective - Update xshell to 0.2 in ci tool - Replace #4205 commit deeaf648975fb6b1b4f0222c6d7065ae11a89823 Author: Thierry Berger Date: Mon May 30 15:57:25 2022 +0000 shader examples wording coherence (#4810) # Objective I noticed different examples descriptions were not using the same structure: ![different_wordings_examples](https://user-images.githubusercontent.com/2290685/169487055-ab76743e-3400-486f-b672-e8f60455b8e4.png) This results in sentences that a reader has to read differently each time, which might result in information being hard to find, especially foreign language users. Original discord discussion: https://discord.com/channels/691052431525675048/976846499889705020 ## Solution - Use less different words, similar structure and being straight to the point. --- ## Changelog - Examples descriptions more accessible. commit 09a3d8abe062984479bf0e99fcc1508bb722baf6 Author: Daniel McNab <36049421+DJMcNab@users.noreply.github.com> Date: Mon May 30 15:32:48 2022 +0000 Allow minimising in 2d (#4527) # Objective - We can't minimise if there's a 2d camera because ??? there legally must be a 2d target. - Fixes https://github.com/bevyengine/bevy/issues/4526 - Fixes https://github.com/bevyengine/bevy/issues/4856 ## Solution - Make it not crash in those cases, just do nothing - Seems to work ¯\\_(ツ)_/¯ - See also the companion commit in https://github.com/bevyengine/bevy/pull/3597 - 503c24717321bb2bb2681b358020ad1bcbef510e Co-authored-by: Asteria commit 60584139de7d1d6dcbf4d1ff4d658b8ca59f8cf6 Author: Jakob Hellermann Date: Mon May 30 15:32:47 2022 +0000 untyped APIs for components and resources (#4447) # Objective Even if bevy itself does not provide any builtin scripting or modding APIs, it should have the foundations for building them yourself. For that it should be enough to have APIs that are not tied to the actual rust types with generics, but rather accept `ComponentId`s and `bevy_ptr` ptrs. ## Solution Add the following APIs to bevy ```rust fn EntityRef::get_by_id(ComponentId) -> Option>; fn EntityMut::get_by_id(ComponentId) -> Option>; fn EntityMut::get_mut_by_id(ComponentId) -> Option>; fn World::get_resource_by_id(ComponentId) -> Option>; fn World::get_resource_mut_by_id(ComponentId) -> Option>; // Safety: `value` must point to a valid value of the component unsafe fn World::insert_resource_by_id(ComponentId, value: OwningPtr); fn ComponentDescriptor::new_with_layout(..) -> Self; fn World::init_component_with_descriptor(ComponentDescriptor) -> ComponentId; ``` ~~This PR would definitely benefit from #3001 (lifetime'd pointers) to make sure that the lifetimes of the pointers are valid and the my-move pointer in `insert_resource_by_id` could be an `OwningPtr`, but that can be adapter later if/when #3001 is merged.~~ ### Not in this PR - inserting components on entities (this is very tied to types with bundles and the `BundleInserter`) - an untyped version of a query (needs good API design, has a large implementation complexity, can be done in a third-party crate) Co-authored-by: Jakob Hellermann commit 5256561b7a78f06f687c92f54baf69ed38c436d2 Author: Matthias Schiffer Date: Mon May 30 15:14:12 2022 +0000 OrthographicProjection: place origin at integer pixel with WindowSize scaling mode (#4085) # Objective One way to avoid texture atlas bleeding is to ensure that every vertex is placed at an integer pixel coordinate. This is a particularly appealing solution for regular structures like tile maps. Doing so is currently harder than necessary when the WindowSize scaling mode and Center origin are used: For odd window width or height, the origin of the coordinate system is placed in the middle of a pixel at some .5 offset. ## Solution Avoid this issue by rounding the half width and height values. commit d353fbc6ea9e0c4fa69fc3b8e646e1ce79fbbc16 Author: François Date: Sat May 28 02:00:55 2022 +0000 update image to 0.24 (#4121) # Objective - update image to 0.24 ## Solution - `Bgra*` variants support have been removed from image, remove them from Bevy code - replace #4003 changeling: https://github.com/image-rs/image/blob/master/CHANGES.md commit 99e689cfd2a8fc328ba0998e2088179dfb5daed9 Author: Thierry Berger Date: Fri May 27 20:52:12 2022 +0000 remove unneeded msaa explicit addition from examples (#4830) # Objective - Coming from https://github.com/bevyengine/bevy/pull/4797/files/7a596f1910c97c41f195b12d33025999e81dc0e5#r876310734 - Simplify the examples regarding addition of `Msaa` Resource with default value. ## Solution - Remove addition of `Msaa` Resource with default value from examples, commit c89af06c6558b80e1ff61e945124625944356cd2 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri May 27 11:54:57 2022 +0000 Update tracing-tracy requirement from 0.8.0 to 0.9.0 (#4786) Updates the requirements on [tracing-tracy](https://github.com/nagisa/rust_tracy_client) to permit the latest version.
Commits
  • 13b335a Remove ability to disable the client at runtime
  • 69e4497 The upgrades to 0.8.1
  • c204b60 Cancel the old test runs
  • 939bd04 Remove the thread initialization calls
  • 7024e77 Update Tracy client bindings to v0.8.1
  • 5c54baa tracy-client 0.12.7
  • f183050 Non-allocating span! macro
  • 15936ea tracy-client 0.12.6
  • 26d0c50 Relax literal the requirement of the create_plot macro so that it can be used...
  • See full diff in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
commit 589c52afe5d434dd339bd2d9d61dd8514dd7a458 Author: Christopher Durham Date: Thu May 26 02:04:22 2022 +0000 Make bevy_app's optional bevy_reflect dependency actually optional (#4846) # Objective - Make bevy_app's optional bevy_reflect dependency actually optional - Because bevy_ecs has a default dependency on bevy_reflect, bevy_app includes bevy_reflect transitively even with default-features=false, despite the optional dependency indicating that it was intended to be able to leave out bevy_reflect. ## Solution - Make bevy_app not enable bevy_ecs's default features, and then use [the `dep:` syntax](https://doc.rust-lang.org/cargo/reference/features.html#optional-dependencies) introduced in 1.60 to make the default bevy_reflect feature enable bevy_ecs's bevy_reflect feature/dependency. --- ## Changelog - bevy_app no longer enables bevy_ecs's `bevy_reflect` feature when included without its own `bevy_reflect` feature (which is on by default). commit 644bd5dbc6c281a4fae65c603f09b6666de8c566 Author: Christopher Durham Date: Thu May 26 00:27:18 2022 +0000 Split time functionality into bevy_time (#4187) # Objective Reduce the catch-all grab-bag of functionality in bevy_core by minimally splitting off time functionality into bevy_time. Functionality like that provided by #3002 would increase the complexity of bevy_time, so this is a good candidate for pulling into its own unit. A step in addressing #2931 and splitting bevy_core into more specific locations. ## Solution Pull the time module of bevy_core into a new crate, bevy_time. # Migration guide - Time related types (e.g. `Time`, `Timer`, `Stopwatch`, `FixedTimestep`, etc.) should be imported from `bevy::time::*` rather than `bevy::core::*`. - If you were adding `CorePlugin` manually, you'll also want to add `TimePlugin` from `bevy::time`. - The `bevy::core::CorePlugin::Time` system label is replaced with `bevy::time::TimeSystem`. Co-authored-by: Carter Anderson commit d683d9b9f57dc1d11c429b2b9dc9b2b1ed2ea4cb Author: Alice Cecile Date: Thu May 26 00:09:23 2022 +0000 Improve docs and naming for RawWindowHandle functionality (#4335) # Objective - As noticed in #4333 by @x-52, the exact purpose and logic of `HasRawWIndowHandleWrapper` is unclear - Unfortunately, there are rather good reasons why this design is needed (and why we can't just `impl HasRawWindowHandle for RawWindowHandleWrapper` ## Solution - Rename `HasRawWindowHandleWrapper` to `ThreadLockedRawWindowHandleWrapper`, reflecting the primary distinction - Document how this design is intended to be used - Leave comments explaining why this design must exist ## Migration Guide - renamed `HasRawWindowHandleWrapper` to `ThreadLockedRawWindowHandleWrapper` commit f271d734e65908d8514da914d2bed7a7bb90696b Author: Niklas Eicker Date: Wed May 25 17:46:58 2022 +0000 Rename Color::as_hlsa_f32 to Color::as_hsla_f32 (#4827) # Objective Make the function consistent with returned values and `as_hsla` method Fixes #4826 ## Solution - Rename the method ## Migration Guide - Rename the method commit 5dd30b627959bf86fea996e76e98bb0bd7250889 Author: Carter Anderson Date: Wed May 25 05:08:34 2022 +0000 Remove markdown dead link check (#4839) # Objective This fails constantly and causes more pain than it is worth. ## Solution Remove dead link checks. Alternative to #4837, which is more granular but ironically still fails to build. I'm in favor of the nuclear option. Fixes #4575 commit fed93a0edce9d66586dc70c1207a2092694b9a7d Author: Carter Anderson Date: Fri May 20 23:13:48 2022 +0000 Optionally resize Window canvas element to fit parent element (#4726) Currently Bevy's web canvases are "fixed size". They are manually set to specific dimensions. This might be fine for some games and website layouts, but for sites with flexible layouts, or games that want to "fill" the browser window, Bevy doesn't provide the tools needed to make this easy out of the box. There are third party plugins like [bevy-web-resizer](https://github.com/frewsxcv/bevy-web-resizer/) that listen for window resizes, take the new dimensions, and resize the winit window accordingly. However this only covers a subset of cases and this is common enough functionality that it should be baked into Bevy. A significant motivating use case here is the [Bevy WASM Examples page](https://bevyengine.org/examples/). This scales the canvas to fit smaller windows (such as mobile). But this approach both breaks winit's mouse events and removes pixel-perfect rendering (which means we might be rendering too many or too few pixels). https://github.com/bevyengine/bevy-website/issues/371 In an ideal world, winit would support this behavior out of the box. But unfortunately that seems blocked for now: https://github.com/rust-windowing/winit/pull/2074. And it builds on the ResizeObserver api, which isn't supported in all browsers yet (and is only supported in very new versions of the popular browsers). While we wait for a complete winit solution, I've added a `fit_canvas_to_parent` option to WindowDescriptor / Window, which when enabled will listen for window resizes and resize the Bevy canvas/window to fit its parent element. This enables users to scale bevy canvases using arbitrary CSS, by "inheriting" their parents' size. Note that the wrapper element _is_ required because winit overrides the canvas sizing with absolute values on each resize. There is one limitation worth calling out here: while the majority of canvas resizes will be triggered by window resizes, modifying element layout at runtime (css animations, javascript-driven element changes, dev-tool-injected changes, etc) will not be detected here. I'm not aware of a good / efficient event-driven way to do this outside of the ResizeObserver api. In practice, window-resize-driven canvas resizing should cover the majority of use cases. Users that want to actively poll for element resizes can just do that (or we can build another feature and let people choose based on their specific needs). I also took the chance to make a couple of minor tweaks: * Made the `canvas` window setting available on all platforms. Users shouldn't need to deal with cargo feature selection to support web scenarios. We can just ignore the value on non-web platforms. I added documentation that explains this. * Removed the redundant "initial create windows" handler. With the addition of the code in this pr, the code duplication was untenable. This enables a number of patterns: ## Easy "fullscreen window" mode for the default canvas The "parent element" defaults to the `` element. ```rust app .insert_resource(WindowDescriptor { fit_canvas_to_parent: true, ..default() }) ``` And CSS: ```css html, body { margin: 0; height: 100%; } ``` ## Fit custom canvas to "wrapper" parent element ```rust app .insert_resource(WindowDescriptor { fit_canvas_to_parent: true, canvas: Some("#bevy".to_string()), ..default() }) ``` And the HTML: ```html
``` commit b6eededea454cc3c44492f0ff76a0604f2e27921 Author: Teodor Tanasoaia <28601907+teoxoy@users.noreply.github.com> Date: Fri May 20 22:05:32 2022 +0000 Use uniform buffer usage for `SkinnedMeshUniform` instead of all usages (#4816) # Objective fixes #4811 (caused by #4339 [[exact change](https://github.com/bevyengine/bevy/pull/4339/files#diff-4bf3ed03d4129aad9f5678ba19f9b14ee8e3e61d6f6365e82197b01c74468b10R712-R721)] - where the buffer type has been changed from `UniformVec` to `BufferVec`) ## Solution Use uniform buffer usage for `SkinnedMeshUniform` instead of all usages due to the `Default` derive. commit 3a93b677a15b0ad4ec979366b0b93377aab1cc3e Author: Gino Valente Date: Fri May 20 13:31:49 2022 +0000 bevy_reflect: Added `get_boxed` method to `reflect_trait` (#4120) # Objective Allow `Box` to be converted into a `Box` using the `#[reflect_trait]` macro. The other methods `get` and `get_mut` only provide a reference to the reflected object. ## Solution Add a `get_boxed` method to the `Reflect***` struct generated by the `#[reflect_trait]` macro. This method takes in a `Box` and returns a `Box`. Co-authored-by: MrGVSV <49806985+MrGVSV@users.noreply.github.com> commit 7cb4d3cb43814b917d3d1e5008a638b0631f08c7 Author: Teodor Tanasoaia <28601907+teoxoy@users.noreply.github.com> Date: Wed May 18 21:09:21 2022 +0000 Migrate to encase from crevice (#4339) # Objective - Unify buffer APIs - Also see #4272 ## Solution - Replace vendored `crevice` with `encase` --- ## Changelog Changed `StorageBuffer` Added `DynamicStorageBuffer` Replaced `UniformVec` with `UniformBuffer` Replaced `DynamicUniformVec` with `DynamicUniformBuffer` ## Migration Guide ### `StorageBuffer` removed `set_body()`, `values()`, `values_mut()`, `clear()`, `push()`, `append()` added `set()`, `get()`, `get_mut()` ### `UniformVec` -> `UniformBuffer` renamed `uniform_buffer()` to `buffer()` removed `len()`, `is_empty()`, `capacity()`, `push()`, `reserve()`, `clear()`, `values()` added `set()`, `get()` ### `DynamicUniformVec` -> `DynamicUniformBuffer` renamed `uniform_buffer()` to `buffer()` removed `capacity()`, `reserve()` Co-authored-by: Carter Anderson commit 1320818f96cc2ea673e726acfcb7441953fad81b Author: Boxy Date: Wed May 18 20:57:24 2022 +0000 Fix unsoundness with `Or`/`AnyOf`/`Option` component access' (#4659) # Objective Fixes #4657 Example code that wasnt panic'ing before this PR (and so was unsound): ```rust #[test] #[should_panic = "error[B0001]"] fn option_has_no_filter_with() { fn sys(_1: Query<(Option<&A>, &mut B)>, _2: Query<&mut B, Without>) {} let mut world = World::default(); run_system(&mut world, sys); } #[test] #[should_panic = "error[B0001]"] fn any_of_has_no_filter_with() { fn sys(_1: Query<(AnyOf<(&A, ())>, &mut B)>, _2: Query<&mut B, Without>) {} let mut world = World::default(); run_system(&mut world, sys); } #[test] #[should_panic = "error[B0001]"] fn or_has_no_filter_with() { fn sys(_1: Query<&mut B, Or<(With, With)>>, _2: Query<&mut B, Without>) {} let mut world = World::default(); run_system(&mut world, sys); } ``` ## Solution - Only add the intersection of `with`/`without` accesses of all the elements in `Or/AnyOf` to the world query's `FilteredAccess` instead of the union. - `Option`'s fix can be thought of the same way since its basically `AnyOf` but its impl is just simpler as `()` has no `with`/`without` accesses --- ## Changelog - `Or`/`AnyOf`/`Option` will now report more query conflicts in order to fix unsoundness ## Migration Guide - If you are now getting query conflicts from `Or`/`AnyOf`/`Option` rip to you and ur welcome for it now being caught commit 2c93b5cf73291a1540c1035a3154aebc8d8aaeb3 Author: James Liu Date: Wed May 18 18:34:52 2022 +0000 Reduce code duplication by using QueryIterationCursor in QueryIter (#4733) # Objective We have duplicated code between `QueryIter` and `QueryIterationCursor`. Reuse that code. ## Solution - Reuse `QueryIterationCursor` inside `QueryIter`. - Slim down `QueryIter` by removing the `&'w World`. It was only being used by the `size_hint` and `ExactSizeIterator` impls, which can use the QueryState and &Archetypes in the type already. - Benchmark to make sure there is no significant regression. Relevant benchmark results seem to show that there is no tangible difference between the two. Everything seems to be either identical or within a workable margin of error here. ``` group embed-cursor main ----- ------------ ---- fragmented_iter/base 1.00 387.4±19.70ns ? ?/sec 1.07 413.1±27.95ns ? ?/sec many_maps_iter 1.00 27.3±0.22ms ? ?/sec 1.00 27.4±0.10ms ? ?/sec simple_iter/base 1.00 13.8±0.07µs ? ?/sec 1.00 13.7±0.17µs ? ?/sec simple_iter/sparse 1.00 61.9±0.37µs ? ?/sec 1.00 62.2±0.64µs ? ?/sec simple_iter/system 1.00 13.7±0.34µs ? ?/sec 1.00 13.7±0.10µs ? ?/sec sparse_fragmented_iter/base 1.00 11.0±0.54ns ? ?/sec 1.03 11.3±0.48ns ? ?/sec world_query_iter/50000_entities_sparse 1.08 105.0±2.68µs ? ?/sec 1.00 97.5±2.18µs ? ?/sec world_query_iter/50000_entities_table 1.00 27.3±0.13µs ? ?/sec 1.00 27.3±0.37µs ? ?/sec ``` commit 15acd6f45deb3cd21d1f3a9e7d39d135068f09c5 Author: MrGVSV Date: Wed May 18 12:26:11 2022 +0000 bevy_reflect: Small refactor and default `Reflect` methods (#4739) # Objective Quick followup to #4712. While updating some [other PRs](https://github.com/bevyengine/bevy/pull/4218), I realized the `ReflectTraits` struct could be improved. The issue with the current implementation is that `ReflectTraits::get_xxx_impl(...)` returns just the _logic_ to the corresponding `Reflect` trait method, rather than the entire function. This makes it slightly more annoying to manage since the variable names need to be consistent across files. For example, `get_partial_eq_impl` uses a `value` variable. But the name "value" isn't defined in the `get_partial_eq_impl` method, it's defined in three other methods in a completely separate file. It's not likely to cause any bugs if we keep it as it is since differing variable names will probably just result in a compile error (except in very particular cases). But it would be useful to someone who wanted to edit/add/remove a method. ## Solution Made `get_hash_impl`, `get_partial_eq_impl` and `get_serialize_impl` return the entire method implementation for `reflect_hash`, `reflect_partial_eq`, and `serializable`, respectively. As a result of this, those three `Reflect` methods were also given default implementations. This was fairly simple to do since all three could just be made to return `None`. --- ## Changelog * Small cleanup/refactor to `ReflectTraits` in `bevy_reflect_derive` * Gave `Reflect::reflect_hash`, `Reflect::reflect_partial_eq`, and `Reflect::serializable` default implementations commit de2b1a4e94fc5e4f16b0b98f5a4280b0646d8fec Author: MrGVSV Date: Tue May 17 23:45:09 2022 +0000 bevy_reflect: Reflected char (#4790) # Objective `char` isn't reflected. ## Solution Reflected `char`. --- ## Changelog * Reflected `char` ## Migration Guide > List too long to display commit a0a14aa615336523fb53eabde8a4f395ded96341 Author: Alex Saveau Date: Tue May 17 22:37:51 2022 +0000 Support returning data out of with_children (#4708) # Objective Support returning data out of with_children to enable the use case of changing the parent commands with data created inside the child builder. ## Solution Change the with_children closure to return T. Closes https://github.com/bevyengine/bevy/pull/2817. --- ## Changelog `BuildChildren::add_children` was added with the ability to return data to use outside the closure (for spawning a new child builder on a returned entity for example). commit 7da21b12f7019d93f2e86a2ae010f57465a9af97 Author: Daniel McNab <36049421+DJMcNab@users.noreply.github.com> Date: Tue May 17 22:24:50 2022 +0000 Add some more documentation to `SystemParam` (#4787) # Objective - Fixes https://github.com/bevyengine/bevy/issues/4783 ## Solution - Add more documentation about the derive, and the obscure failure case for this. - Link to [`StaticSystemParam`](https://docs.rs/bevy/latest/bevy/ecs/system/struct.StaticSystemParam.html) in these docs. - Also explain the attributes whilst here. commit ae0cb549ff1c8ceb3524825351aa01b235bbdac0 Author: François Date: Tue May 17 19:04:08 2022 +0000 helper tool to build examples in wasm (#4776) # Objective - add an helper to build examples in wasm (from #4700) ## Solution - `cargo run -p build-wasm-example -- lighting` commit e36bfa21ab4ea3e771c6bafc4149ff22c936d777 Author: Matt Wilkinson Date: Tue May 17 16:00:17 2022 +0000 Change path to zld on MacOS fast build example (#4778) # Objective Fixes #4751, zld link error. ## Solution - Change the `zld` file path in the example to the one homebrew installs to by default, `/usr/local/bin/zld`. --- .cargo/config_fast_builds | 2 +- .github/ISSUE_TEMPLATE/bug_report.md | 43 +- .github/bors.toml | 4 +- .github/linters/markdown-link-check.json | 27 - .github/start-wasm-example/.gitignore | 4 + .github/start-wasm-example/package-lock.json | 76 + .github/start-wasm-example/package.json | 16 + .../start-wasm-example/playwright.config.ts | 107 + .../tests/wasm_example.spec.ts | 51 + .github/workflows/ci.yml | 102 +- .github/workflows/ios.yml | 31 - .github/workflows/post-release.yml | 59 + .github/workflows/release.yml | 56 + .github/workflows/validation-jobs.yml | 157 ++ CONTRIBUTING.md | 8 +- Cargo.toml | 36 +- LICENSE | 6 - docs/LICENSE-APACHE => LICENSE-APACHE | 0 docs/LICENSE-MIT => LICENSE-MIT | 0 README.md | 4 +- assets/shaders/animate_shader.wgsl | 11 +- .../custom_material_chromatic_aberration.wgsl | 25 + .../custom_material_screenspace_texture.wgsl | 2 +- assets/shaders/custom_vertex_attribute.wgsl | 22 +- assets/shaders/instancing.wgsl | 11 +- assets/shaders/shader_defs.wgsl | 13 +- benches/Cargo.toml | 6 + .../bevy_ecs/ecs_bench_suite/heavy_compute.rs | 9 +- benches/benches/bevy_ecs/schedule.rs | 69 + crates/bevy_animation/Cargo.toml | 1 + crates/bevy_animation/src/lib.rs | 5 +- crates/bevy_app/Cargo.toml | 4 +- crates/bevy_app/src/ci_testing.rs | 25 +- crates/bevy_app/src/plugin_group.rs | 14 +- crates/bevy_asset/Cargo.toml | 2 +- crates/bevy_asset/src/asset_server.rs | 29 +- crates/bevy_asset/src/assets.rs | 8 +- crates/bevy_asset/src/debug_asset_server.rs | 15 +- .../asset_count_diagnostics_plugin.rs | 2 +- crates/bevy_asset/src/handle.rs | 12 +- crates/bevy_asset/src/lib.rs | 7 +- crates/bevy_asset/src/loader.rs | 8 - crates/bevy_asset/src/path.rs | 2 +- crates/bevy_audio/src/audio_output.rs | 8 +- crates/bevy_core/src/lib.rs | 52 +- crates/bevy_core/src/task_pool_options.rs | 28 +- crates/bevy_core/src/time/mod.rs | 10 - crates/bevy_core_pipeline/Cargo.toml | 6 + crates/bevy_core_pipeline/src/clear_color.rs | 32 + crates/bevy_core_pipeline/src/clear_pass.rs | 128 -- .../src/clear_pass_driver.rs | 20 - .../src/core_2d/camera_2d.rs | 82 + .../src/core_2d/main_pass_2d_node.rs | 119 ++ crates/bevy_core_pipeline/src/core_2d/mod.rs | 130 ++ .../src/core_3d/camera_3d.rs | 85 + .../main_pass_3d_node.rs} | 59 +- crates/bevy_core_pipeline/src/core_3d/mod.rs | 247 +++ crates/bevy_core_pipeline/src/lib.rs | 433 +--- crates/bevy_core_pipeline/src/main_pass_2d.rs | 74 - .../src/main_pass_driver.rs | 33 - crates/bevy_crevice/Cargo.toml | 36 - crates/bevy_crevice/LICENSE-APACHE | 201 -- crates/bevy_crevice/LICENSE-MIT | 19 - crates/bevy_crevice/README.md | 181 -- crates/bevy_crevice/README.tpl | 25 - .../bevy-crevice-derive/Cargo.toml | 27 - .../bevy-crevice-derive/src/glsl.rs | 48 - .../bevy-crevice-derive/src/layout.rs | 287 --- .../bevy-crevice-derive/src/lib.rs | 59 - crates/bevy_crevice/crevice-tests/Cargo.toml | 20 - crates/bevy_crevice/crevice-tests/src/gpu.rs | 268 --- crates/bevy_crevice/crevice-tests/src/lib.rs | 366 ---- crates/bevy_crevice/crevice-tests/src/util.rs | 143 -- crates/bevy_crevice/src/glsl.rs | 93 - crates/bevy_crevice/src/imp.rs | 10 - crates/bevy_crevice/src/imp/imp_cgmath.rs | 30 - crates/bevy_crevice/src/imp/imp_glam.rs | 24 - crates/bevy_crevice/src/imp/imp_mint.rs | 30 - crates/bevy_crevice/src/imp/imp_nalgebra.rs | 24 - crates/bevy_crevice/src/internal.rs | 40 - crates/bevy_crevice/src/lib.rs | 174 -- crates/bevy_crevice/src/std140.rs | 18 - .../src/std140/dynamic_uniform.rs | 68 - crates/bevy_crevice/src/std140/primitives.rs | 175 -- crates/bevy_crevice/src/std140/sizer.rs | 81 - crates/bevy_crevice/src/std140/traits.rs | 284 --- crates/bevy_crevice/src/std140/writer.rs | 162 -- crates/bevy_crevice/src/std430.rs | 16 - crates/bevy_crevice/src/std430/primitives.rs | 173 -- crates/bevy_crevice/src/std430/sizer.rs | 81 - crates/bevy_crevice/src/std430/traits.rs | 291 --- crates/bevy_crevice/src/std430/writer.rs | 150 -- crates/bevy_crevice/src/util.rs | 97 - .../test__generate_struct_array_glsl.snap | 8 - .../snapshots/test__generate_struct_glsl.snap | 9 - crates/bevy_crevice/tests/test.rs | 61 - crates/bevy_diagnostic/Cargo.toml | 2 +- crates/bevy_diagnostic/src/diagnostic.rs | 56 +- .../src/entity_count_diagnostics_plugin.rs | 14 +- .../src/frame_time_diagnostics_plugin.rs | 34 +- .../src/log_diagnostics_plugin.rs | 69 +- crates/bevy_ecs/Cargo.toml | 1 + crates/bevy_ecs/macros/src/fetch.rs | 104 +- crates/bevy_ecs/src/archetype.rs | 51 +- crates/bevy_ecs/src/change_detection.rs | 56 +- crates/bevy_ecs/src/component.rs | 179 +- crates/bevy_ecs/src/entity/mod.rs | 104 +- crates/bevy_ecs/src/lib.rs | 12 +- crates/bevy_ecs/src/query/access.rs | 43 +- crates/bevy_ecs/src/query/fetch.rs | 654 +++--- crates/bevy_ecs/src/query/filter.rs | 234 ++- crates/bevy_ecs/src/query/iter.rs | 238 ++- crates/bevy_ecs/src/query/mod.rs | 43 +- crates/bevy_ecs/src/query/state.rs | 355 +++- crates/bevy_ecs/src/reflect.rs | 1 + .../src/schedule/executor_parallel.rs | 5 +- crates/bevy_ecs/src/schedule/stage.rs | 8 +- crates/bevy_ecs/src/schedule/state.rs | 29 +- crates/bevy_ecs/src/storage/blob_vec.rs | 9 +- crates/bevy_ecs/src/storage/mod.rs | 1 - crates/bevy_ecs/src/storage/sparse_set.rs | 170 +- crates/bevy_ecs/src/storage/table.rs | 108 +- .../src/system/commands/command_queue.rs | 61 +- crates/bevy_ecs/src/system/commands/mod.rs | 2 + .../src/system/commands/parallel_scope.rs | 98 + crates/bevy_ecs/src/system/function_system.rs | 119 +- crates/bevy_ecs/src/system/mod.rs | 66 + crates/bevy_ecs/src/system/query.rs | 214 +- crates/bevy_ecs/src/system/system_param.rs | 59 +- crates/bevy_ecs/src/world/entity_ref.rs | 153 +- crates/bevy_ecs/src/world/mod.rs | 275 ++- crates/bevy_ecs/src/world/spawn_batch.rs | 8 + crates/bevy_ecs/src/world/world_cell.rs | 3 +- crates/bevy_ecs_compile_fail_tests/Cargo.toml | 4 +- .../ui/system_param_derive_readonly.stderr | 5 +- ...query_many_for_each_mut_lifetime_safety.rs | 14 + ...y_many_for_each_mut_lifetime_safety.stderr | 10 + .../tests/ui/world_query_derive.stderr | 16 +- crates/bevy_encase_derive/Cargo.toml | 16 + crates/bevy_encase_derive/src/lib.rs | 40 + crates/bevy_gilrs/Cargo.toml | 2 +- crates/bevy_gltf/Cargo.toml | 2 + crates/bevy_gltf/src/loader.rs | 128 +- crates/bevy_hierarchy/src/child_builder.rs | 87 +- .../bevy_hierarchy/src/components/children.rs | 11 + .../bevy_hierarchy/src/components/parent.rs | 12 +- crates/bevy_input/src/input.rs | 44 + crates/bevy_input/src/mouse.rs | 2 +- crates/bevy_input/src/touch.rs | 20 + crates/bevy_internal/Cargo.toml | 15 +- crates/bevy_internal/src/default_plugins.rs | 4 + crates/bevy_internal/src/lib.rs | 7 +- crates/bevy_internal/src/prelude.rs | 3 +- crates/bevy_log/Cargo.toml | 2 +- crates/bevy_mikktspace/Cargo.toml | 17 + crates/bevy_mikktspace/LICENSE-APACHE | 176 ++ crates/bevy_mikktspace/LICENSE-MIT | 26 + crates/bevy_mikktspace/README.md | 35 + crates/bevy_mikktspace/examples/cube.obj | 114 ++ crates/bevy_mikktspace/examples/generate.rs | 259 +++ crates/bevy_mikktspace/src/generated.rs | 1809 +++++++++++++++++ crates/bevy_mikktspace/src/lib.rs | 85 + .../bevy_mikktspace/tests/regression_test.rs | 889 ++++++++ crates/bevy_pbr/src/lib.rs | 61 +- crates/bevy_pbr/src/light.rs | 38 +- crates/bevy_pbr/src/material.rs | 29 +- crates/bevy_pbr/src/pbr_material.rs | 31 +- .../src/render/clustered_forward.wgsl | 100 + crates/bevy_pbr/src/render/depth.wgsl | 14 +- crates/bevy_pbr/src/render/light.rs | 241 ++- crates/bevy_pbr/src/render/mesh.rs | 135 +- crates/bevy_pbr/src/render/mesh.wgsl | 45 +- crates/bevy_pbr/src/render/mesh_bindings.wgsl | 11 + .../bevy_pbr/src/render/mesh_functions.wgsl | 36 + .../{mesh_struct.wgsl => mesh_types.wgsl} | 2 +- .../src/render/mesh_view_bindings.wgsl | 42 + ...w_bind_group.wgsl => mesh_view_types.wgsl} | 41 +- crates/bevy_pbr/src/render/pbr.wgsl | 753 +------ crates/bevy_pbr/src/render/pbr_bindings.wgsl | 26 + crates/bevy_pbr/src/render/pbr_functions.wgsl | 196 ++ crates/bevy_pbr/src/render/pbr_lighting.wgsl | 255 +++ crates/bevy_pbr/src/render/pbr_types.wgsl | 24 + crates/bevy_pbr/src/render/shadows.wgsl | 77 + crates/bevy_pbr/src/render/skinning.wgsl | 34 +- crates/bevy_pbr/src/render/utils.wgsl | 23 + crates/bevy_pbr/src/render/wireframe.wgsl | 29 +- crates/bevy_pbr/src/wireframe.rs | 16 +- crates/bevy_ptr/src/lib.rs | 2 +- crates/bevy_reflect/Cargo.toml | 2 +- .../bevy_reflect_derive/Cargo.toml | 2 +- .../src/container_attributes.rs | 73 +- .../src/field_attributes.rs | 46 +- .../bevy_reflect_derive/src/from_reflect.rs | 63 +- .../bevy_reflect_derive/src/impls.rs | 182 +- .../bevy_reflect_derive/src/lib.rs | 2 + .../src/trait_reflection.rs | 35 + .../bevy_reflect_derive/src/type_uuid.rs | 2 +- crates/bevy_reflect/src/array.rs | 156 +- crates/bevy_reflect/src/fields.rs | 84 + crates/bevy_reflect/src/impls/glam.rs | 40 +- crates/bevy_reflect/src/impls/smallvec.rs | 21 +- crates/bevy_reflect/src/impls/std.rs | 124 +- crates/bevy_reflect/src/lib.rs | 397 +++- crates/bevy_reflect/src/list.rs | 117 +- crates/bevy_reflect/src/map.rs | 137 +- crates/bevy_reflect/src/reflect.rs | 56 +- crates/bevy_reflect/src/serde/ser.rs | 26 +- crates/bevy_reflect/src/struct_trait.rs | 149 +- crates/bevy_reflect/src/tuple.rs | 136 +- crates/bevy_reflect/src/tuple_struct.rs | 120 +- crates/bevy_reflect/src/type_info.rs | 223 ++ crates/bevy_reflect/src/type_registry.rs | 117 +- crates/bevy_reflect/src/utility.rs | 142 ++ crates/bevy_render/Cargo.toml | 8 +- crates/bevy_render/macros/Cargo.toml | 19 + .../macros/src/extract_resource.rs | 26 + crates/bevy_render/macros/src/lib.rs | 16 + crates/bevy_render/src/camera/bundle.rs | 164 -- crates/bevy_render/src/camera/camera.rs | 464 +++-- .../src/camera/camera_driver_node.rs | 109 + crates/bevy_render/src/camera/mod.rs | 36 +- crates/bevy_render/src/camera/projection.rs | 179 +- crates/bevy_render/src/color/mod.rs | 45 +- ...nder_component.rs => extract_component.rs} | 20 +- crates/bevy_render/src/extract_resource.rs | 46 + crates/bevy_render/src/lib.rs | 17 +- .../bevy_render/src/mesh/mesh/conversions.rs | 40 +- crates/bevy_render/src/mesh/mesh/mod.rs | 172 +- crates/bevy_render/src/render_asset.rs | 5 +- crates/bevy_render/src/render_graph/edge.rs | 6 +- crates/bevy_render/src/render_graph/graph.rs | 19 +- crates/bevy_render/src/render_graph/node.rs | 36 +- .../src/render_phase/draw_state.rs | 46 +- .../src/render_resource/buffer_vec.rs | 13 +- crates/bevy_render/src/render_resource/mod.rs | 11 +- .../src/render_resource/pipeline_cache.rs | 32 +- .../bevy_render/src/render_resource/shader.rs | 38 +- .../src/render_resource/storage_buffer.rs | 269 +-- .../src/render_resource/uniform_buffer.rs | 150 ++ .../src/render_resource/uniform_vec.rs | 166 -- .../bevy_render/src/renderer/graph_runner.rs | 19 +- crates/bevy_render/src/texture/basis.rs | 6 +- crates/bevy_render/src/texture/dds.rs | 165 +- .../src/texture/hdr_texture_loader.rs | 2 +- crates/bevy_render/src/texture/image.rs | 192 +- .../src/texture/image_texture_conversion.rs | 72 +- crates/bevy_render/src/texture/ktx2.rs | 370 +--- crates/bevy_render/src/texture/mod.rs | 39 + crates/bevy_render/src/view/mod.rs | 32 +- crates/bevy_render/src/view/visibility/mod.rs | 100 +- crates/bevy_scene/Cargo.toml | 4 +- crates/bevy_scene/src/bundle.rs | 74 + crates/bevy_scene/src/command.rs | 56 - crates/bevy_scene/src/dynamic_scene.rs | 11 +- crates/bevy_scene/src/lib.rs | 12 +- crates/bevy_scene/src/scene.rs | 6 + crates/bevy_scene/src/scene_spawner.rs | 70 +- crates/bevy_sprite/src/lib.rs | 2 +- .../bevy_sprite/src/mesh2d/color_material.rs | 18 +- .../src/mesh2d/color_material.wgsl | 16 +- crates/bevy_sprite/src/mesh2d/material.rs | 4 +- crates/bevy_sprite/src/mesh2d/mesh.rs | 66 +- crates/bevy_sprite/src/mesh2d/mesh2d.wgsl | 45 +- .../src/mesh2d/mesh2d_bindings.wgsl | 6 + .../src/mesh2d/mesh2d_functions.wgsl | 36 + .../{mesh2d_struct.wgsl => mesh2d_types.wgsl} | 2 +- .../src/mesh2d/mesh2d_view_bindings.wgsl | 6 + ...bind_group.wgsl => mesh2d_view_types.wgsl} | 2 +- crates/bevy_sprite/src/render/mod.rs | 23 +- crates/bevy_sprite/src/render/sprite.wgsl | 6 +- crates/bevy_sprite/src/texture_atlas.rs | 14 +- crates/bevy_tasks/Cargo.toml | 3 +- crates/bevy_tasks/src/task_pool.rs | 47 +- crates/bevy_tasks/src/usages.rs | 71 +- crates/bevy_text/src/font_atlas_set.rs | 10 +- crates/bevy_text/src/pipeline.rs | 2 +- crates/bevy_text/src/text2d.rs | 6 +- crates/bevy_time/Cargo.toml | 17 + .../time => bevy_time/src}/fixed_timestep.rs | 0 crates/bevy_time/src/lib.rs | 46 + .../src/time => bevy_time/src}/stopwatch.rs | 20 +- .../src/time => bevy_time/src}/time.rs | 11 +- .../src/time => bevy_time/src}/timer.rs | 34 +- crates/bevy_transform/src/systems.rs | 29 +- crates/bevy_ui/Cargo.toml | 1 + crates/bevy_ui/src/entity.rs | 60 +- crates/bevy_ui/src/flex/mod.rs | 2 +- crates/bevy_ui/src/focus.rs | 16 +- crates/bevy_ui/src/lib.rs | 6 +- crates/bevy_ui/src/render/camera.rs | 18 - crates/bevy_ui/src/render/mod.rs | 163 +- crates/bevy_ui/src/render/pipeline.rs | 7 +- crates/bevy_ui/src/render/render_pass.rs | 68 +- crates/bevy_ui/src/ui_node.rs | 6 +- crates/bevy_ui/src/widget/image.rs | 4 +- crates/bevy_ui/src/widget/text.rs | 7 +- crates/bevy_utils/Cargo.toml | 5 +- crates/bevy_window/src/cursor.rs | 44 +- crates/bevy_window/src/event.rs | 12 +- crates/bevy_window/src/lib.rs | 5 +- crates/bevy_window/src/raw_window_handle.rs | 33 +- crates/bevy_window/src/window.rs | 246 ++- crates/bevy_window/src/windows.rs | 10 +- crates/bevy_winit/Cargo.toml | 1 + crates/bevy_winit/src/lib.rs | 67 +- crates/bevy_winit/src/web_resize.rs | 83 + crates/bevy_winit/src/winit_config.rs | 14 +- crates/bevy_winit/src/winit_windows.rs | 3 +- deny.toml | 8 +- docs/linux_dependencies.md | 19 +- docs/plugins_guidelines.md | 2 +- docs/profiling.md | 10 +- docs/release_checklist.md | 4 +- examples/2d/mesh2d.rs | 2 +- examples/2d/mesh2d_manual.rs | 16 +- examples/2d/mesh2d_vertex_color_texture.rs | 40 + examples/2d/move_sprite.rs | 2 +- examples/2d/rotation.rs | 4 +- examples/2d/shapes.rs | 2 +- examples/2d/sprite.rs | 2 +- examples/2d/sprite_flipping.rs | 2 +- examples/2d/sprite_sheet.rs | 5 +- examples/2d/text2d.rs | 2 +- examples/2d/texture_atlas.rs | 10 +- examples/2d/transparency_2d.rs | 41 + examples/3d/3d_scene.rs | 3 +- examples/3d/lighting.rs | 2 +- examples/3d/load_gltf.rs | 7 +- examples/3d/msaa.rs | 2 +- examples/3d/orthographic.rs | 19 +- examples/3d/parenting.rs | 3 +- examples/3d/pbr.rs | 9 +- examples/3d/render_to_texture.rs | 122 +- examples/3d/shadow_biases.rs | 2 +- examples/3d/shadow_caster_receiver.rs | 2 +- examples/3d/shapes.rs | 15 +- examples/3d/spherical_area_lights.rs | 2 +- examples/3d/split_screen.rs | 112 + examples/3d/texture.rs | 2 +- examples/3d/transparency_3d.rs | 98 + examples/3d/two_passes.rs | 244 +-- examples/3d/update_gltf_scene.rs | 100 +- examples/3d/vertex_colors.rs | 3 +- examples/3d/wireframe.rs | 3 +- examples/README.md | 41 +- examples/android/android.rs | 3 +- examples/animation/animated_fox.rs | 10 +- examples/animation/animated_transform.rs | 2 +- examples/animation/custom_skinned_mesh.rs | 6 +- examples/animation/gltf_skinned_mesh.rs | 9 +- examples/app/without_winit.rs | 2 +- examples/asset/asset_loading.rs | 3 +- examples/asset/custom_asset_io.rs | 8 +- examples/asset/hot_asset_reloading.rs | 7 +- examples/async_tasks/async_compute.rs | 6 +- .../external_source_external_thread.rs | 2 +- examples/audio/audio_control.rs | 4 +- examples/diagnostics/custom_diagnostic.rs | 2 +- examples/ecs/ecs_guide.rs | 2 +- examples/ecs/fixed_timestep.rs | 2 +- examples/ecs/hierarchy.rs | 2 +- examples/ecs/iter_combinations.rs | 5 +- examples/ecs/parallel_query.rs | 18 +- examples/ecs/removal_detection.rs | 2 +- examples/ecs/state.rs | 8 +- examples/games/alien_cake_addict.rs | 56 +- examples/games/breakout.rs | 17 +- examples/games/contributors.rs | 3 +- examples/games/game_menu.rs | 8 +- examples/ios/src/lib.rs | 66 +- examples/reflection/reflection_types.rs | 1 + examples/scene/scene.rs | 18 +- examples/shader/animate_shader.rs | 9 +- .../shader/compute_shader_game_of_life.rs | 27 +- examples/shader/custom_vertex_attribute.rs | 18 +- examples/shader/post_processing.rs | 257 +++ examples/shader/shader_defs.rs | 9 +- examples/shader/shader_instancing.rs | 13 +- examples/shader/shader_material.rs | 19 +- examples/shader/shader_material_glsl.rs | 18 +- .../shader_material_screenspace_texture.rs | 8 +- examples/stress_tests/bevymark.rs | 5 +- examples/stress_tests/many_cubes.rs | 4 +- examples/stress_tests/many_foxes.rs | 21 +- examples/stress_tests/many_lights.rs | 27 +- examples/stress_tests/many_sprites.rs | 2 +- examples/stress_tests/transform_hierarchy.rs | 4 +- examples/tools/scene_viewer.rs | 261 +-- examples/transforms/3d_rotation.rs | 8 +- .../transforms/global_vs_local_translation.rs | 19 +- examples/transforms/scale.rs | 8 +- examples/transforms/transform.rs | 10 +- examples/transforms/translation.rs | 8 +- examples/ui/button.rs | 2 +- examples/ui/font_atlas_debug.rs | 2 +- examples/ui/text.rs | 2 +- examples/ui/text_debug.rs | 2 +- examples/ui/transparency_ui.rs | 76 + examples/ui/ui.rs | 4 +- examples/window/clear_color.rs | 2 +- examples/window/low_power.rs | 11 +- examples/window/multiple_windows.rs | 108 +- examples/window/scale_factor_override.rs | 4 +- examples/window/transparent_window.rs | 2 +- tests/how_to_test_systems.rs | 144 +- tests/window/minimising.rs | 17 +- tests/window/resizing.rs | 17 +- tools/build-wasm-example/Cargo.toml | 12 + tools/build-wasm-example/src/main.rs | 67 + tools/ci/Cargo.toml | 2 +- tools/ci/src/main.rs | 48 +- tools/publish.sh | 30 +- tools/spancmp/Cargo.toml | 2 +- tools/spancmp/src/main.rs | 8 +- 414 files changed, 15638 insertions(+), 10339 deletions(-) delete mode 100644 .github/linters/markdown-link-check.json create mode 100644 .github/start-wasm-example/.gitignore create mode 100644 .github/start-wasm-example/package-lock.json create mode 100644 .github/start-wasm-example/package.json create mode 100644 .github/start-wasm-example/playwright.config.ts create mode 100644 .github/start-wasm-example/tests/wasm_example.spec.ts delete mode 100644 .github/workflows/ios.yml create mode 100644 .github/workflows/post-release.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/validation-jobs.yml delete mode 100644 LICENSE rename docs/LICENSE-APACHE => LICENSE-APACHE (100%) rename docs/LICENSE-MIT => LICENSE-MIT (100%) create mode 100644 assets/shaders/custom_material_chromatic_aberration.wgsl create mode 100644 benches/benches/bevy_ecs/schedule.rs delete mode 100644 crates/bevy_core/src/time/mod.rs create mode 100644 crates/bevy_core_pipeline/src/clear_color.rs delete mode 100644 crates/bevy_core_pipeline/src/clear_pass.rs delete mode 100644 crates/bevy_core_pipeline/src/clear_pass_driver.rs create mode 100644 crates/bevy_core_pipeline/src/core_2d/camera_2d.rs create mode 100644 crates/bevy_core_pipeline/src/core_2d/main_pass_2d_node.rs create mode 100644 crates/bevy_core_pipeline/src/core_2d/mod.rs create mode 100644 crates/bevy_core_pipeline/src/core_3d/camera_3d.rs rename crates/bevy_core_pipeline/src/{main_pass_3d.rs => core_3d/main_pass_3d_node.rs} (75%) create mode 100644 crates/bevy_core_pipeline/src/core_3d/mod.rs delete mode 100644 crates/bevy_core_pipeline/src/main_pass_2d.rs delete mode 100644 crates/bevy_core_pipeline/src/main_pass_driver.rs delete mode 100644 crates/bevy_crevice/Cargo.toml delete mode 100644 crates/bevy_crevice/LICENSE-APACHE delete mode 100644 crates/bevy_crevice/LICENSE-MIT delete mode 100644 crates/bevy_crevice/README.md delete mode 100644 crates/bevy_crevice/README.tpl delete mode 100644 crates/bevy_crevice/bevy-crevice-derive/Cargo.toml delete mode 100644 crates/bevy_crevice/bevy-crevice-derive/src/glsl.rs delete mode 100644 crates/bevy_crevice/bevy-crevice-derive/src/layout.rs delete mode 100644 crates/bevy_crevice/bevy-crevice-derive/src/lib.rs delete mode 100644 crates/bevy_crevice/crevice-tests/Cargo.toml delete mode 100644 crates/bevy_crevice/crevice-tests/src/gpu.rs delete mode 100644 crates/bevy_crevice/crevice-tests/src/lib.rs delete mode 100644 crates/bevy_crevice/crevice-tests/src/util.rs delete mode 100644 crates/bevy_crevice/src/glsl.rs delete mode 100644 crates/bevy_crevice/src/imp.rs delete mode 100644 crates/bevy_crevice/src/imp/imp_cgmath.rs delete mode 100644 crates/bevy_crevice/src/imp/imp_glam.rs delete mode 100644 crates/bevy_crevice/src/imp/imp_mint.rs delete mode 100644 crates/bevy_crevice/src/imp/imp_nalgebra.rs delete mode 100644 crates/bevy_crevice/src/internal.rs delete mode 100644 crates/bevy_crevice/src/lib.rs delete mode 100644 crates/bevy_crevice/src/std140.rs delete mode 100644 crates/bevy_crevice/src/std140/dynamic_uniform.rs delete mode 100644 crates/bevy_crevice/src/std140/primitives.rs delete mode 100644 crates/bevy_crevice/src/std140/sizer.rs delete mode 100644 crates/bevy_crevice/src/std140/traits.rs delete mode 100644 crates/bevy_crevice/src/std140/writer.rs delete mode 100644 crates/bevy_crevice/src/std430.rs delete mode 100644 crates/bevy_crevice/src/std430/primitives.rs delete mode 100644 crates/bevy_crevice/src/std430/sizer.rs delete mode 100644 crates/bevy_crevice/src/std430/traits.rs delete mode 100644 crates/bevy_crevice/src/std430/writer.rs delete mode 100644 crates/bevy_crevice/src/util.rs delete mode 100644 crates/bevy_crevice/tests/snapshots/test__generate_struct_array_glsl.snap delete mode 100644 crates/bevy_crevice/tests/snapshots/test__generate_struct_glsl.snap delete mode 100644 crates/bevy_crevice/tests/test.rs create mode 100644 crates/bevy_ecs/src/system/commands/parallel_scope.rs create mode 100644 crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.rs create mode 100644 crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.stderr create mode 100644 crates/bevy_encase_derive/Cargo.toml create mode 100644 crates/bevy_encase_derive/src/lib.rs create mode 100644 crates/bevy_mikktspace/Cargo.toml create mode 100644 crates/bevy_mikktspace/LICENSE-APACHE create mode 100644 crates/bevy_mikktspace/LICENSE-MIT create mode 100644 crates/bevy_mikktspace/README.md create mode 100644 crates/bevy_mikktspace/examples/cube.obj create mode 100644 crates/bevy_mikktspace/examples/generate.rs create mode 100644 crates/bevy_mikktspace/src/generated.rs create mode 100644 crates/bevy_mikktspace/src/lib.rs create mode 100644 crates/bevy_mikktspace/tests/regression_test.rs create mode 100644 crates/bevy_pbr/src/render/clustered_forward.wgsl create mode 100644 crates/bevy_pbr/src/render/mesh_bindings.wgsl create mode 100644 crates/bevy_pbr/src/render/mesh_functions.wgsl rename crates/bevy_pbr/src/render/{mesh_struct.wgsl => mesh_types.wgsl} (88%) create mode 100644 crates/bevy_pbr/src/render/mesh_view_bindings.wgsl rename crates/bevy_pbr/src/render/{mesh_view_bind_group.wgsl => mesh_view_types.wgsl} (67%) create mode 100644 crates/bevy_pbr/src/render/pbr_bindings.wgsl create mode 100644 crates/bevy_pbr/src/render/pbr_functions.wgsl create mode 100644 crates/bevy_pbr/src/render/pbr_lighting.wgsl create mode 100644 crates/bevy_pbr/src/render/pbr_types.wgsl create mode 100644 crates/bevy_pbr/src/render/shadows.wgsl create mode 100644 crates/bevy_pbr/src/render/utils.wgsl create mode 100644 crates/bevy_reflect/src/fields.rs create mode 100644 crates/bevy_reflect/src/type_info.rs create mode 100644 crates/bevy_reflect/src/utility.rs create mode 100644 crates/bevy_render/macros/Cargo.toml create mode 100644 crates/bevy_render/macros/src/extract_resource.rs create mode 100644 crates/bevy_render/macros/src/lib.rs delete mode 100644 crates/bevy_render/src/camera/bundle.rs create mode 100644 crates/bevy_render/src/camera/camera_driver_node.rs rename crates/bevy_render/src/{render_component.rs => extract_component.rs} (90%) create mode 100644 crates/bevy_render/src/extract_resource.rs create mode 100644 crates/bevy_render/src/render_resource/uniform_buffer.rs delete mode 100644 crates/bevy_render/src/render_resource/uniform_vec.rs create mode 100644 crates/bevy_scene/src/bundle.rs delete mode 100644 crates/bevy_scene/src/command.rs create mode 100644 crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl create mode 100644 crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl rename crates/bevy_sprite/src/mesh2d/{mesh2d_struct.wgsl => mesh2d_types.wgsl} (81%) create mode 100644 crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl rename crates/bevy_sprite/src/mesh2d/{mesh2d_view_bind_group.wgsl => mesh2d_view_types.wgsl} (77%) create mode 100644 crates/bevy_time/Cargo.toml rename crates/{bevy_core/src/time => bevy_time/src}/fixed_timestep.rs (100%) create mode 100644 crates/bevy_time/src/lib.rs rename crates/{bevy_core/src/time => bevy_time/src}/stopwatch.rs (94%) rename crates/{bevy_core/src/time => bevy_time/src}/time.rs (96%) rename crates/{bevy_core/src/time => bevy_time/src}/timer.rs (97%) delete mode 100644 crates/bevy_ui/src/render/camera.rs create mode 100644 crates/bevy_winit/src/web_resize.rs create mode 100644 examples/2d/mesh2d_vertex_color_texture.rs create mode 100644 examples/2d/transparency_2d.rs create mode 100644 examples/3d/split_screen.rs create mode 100644 examples/3d/transparency_3d.rs create mode 100644 examples/shader/post_processing.rs create mode 100644 examples/ui/transparency_ui.rs create mode 100644 tools/build-wasm-example/Cargo.toml create mode 100644 tools/build-wasm-example/src/main.rs diff --git a/.cargo/config_fast_builds b/.cargo/config_fast_builds index 4d1ff9c1e3b38..cfbcd697c8f03 100644 --- a/.cargo/config_fast_builds +++ b/.cargo/config_fast_builds @@ -10,7 +10,7 @@ rustflags = ["-Clink-arg=-fuse-ld=lld", "-Zshare-generics=y"] # NOTE: you must manually install https://github.com/michaeleisel/zld on mac. you can easily do this with the "brew" package manager: # `brew install michaeleisel/zld/zld` [target.x86_64-apple-darwin] -rustflags = ["-C", "link-arg=-fuse-ld=/opt/homebrew/bin/zld", "-Zshare-generics=y"] +rustflags = ["-C", "link-arg=-fuse-ld=/usr/local/bin/zld", "-Zshare-generics=y"] [target.aarch64-apple-darwin] rustflags = ["-C", "link-arg=-fuse-ld=/opt/homebrew/bin/zld", "-Zshare-generics=y"] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 7ce131577cf2d..432363bdcc764 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -10,23 +10,46 @@ assignees: '' The release number or commit hash of the version you're using. -## Operating system & version +## \[Optional\] Relevant system information -Ex: Windows 10, Ubuntu 18.04, iOS 14. +If you cannot get Bevy to build or run on your machine, please include: -## What you did +- the Rust version you're using (you can get this by running `cargo --version`) + - Bevy relies on the "latest stable release" of Rust + - nightly should generally work, but there are sometimes regressions: please let us know! +- the operating system or browser used, including its version + - e.g. Windows 10, Ubuntu 18.04, iOS 14 + +If your bug is rendering-related, copy the adapter info that appears when you run Bevy. -The steps you took to uncover this bug. Please list full reproduction steps if -feasible. +```ignore +`AdapterInfo { name: "NVIDIA GeForce RTX 2070", vendor: 4318, device: 7938, device_type: DiscreteGpu, backend: Vulkan }` +``` -## What you expected to happen +You should also consider testing the examples of our upstream dependencies to help isolate any setup-specific issue: -What you think should've happened if everything was working properly. +- [`wgpu`](https://github.com/gfx-rs/wgpu) for rendering problems +- [`winit`](https://github.com/rust-windowing/winit) for input and window management +- [`gilrs`](https://docs.rs/gilrs/latest/gilrs/) for gamepad inputs + +## What you did -## What actually happened +Describe how you arrived at the problem. If you can, consider providing a code snippet or link. -The actual result of the actions you described. +## What went wrong + +If it's not clear, break this out into: + +- what were you expecting? +- what actually happened? ## Additional information -Any additional information you would like to add such as screenshots, logs, etc. +Other information that can be used to further reproduce or isolate the problem. +This commonly includes: + +- screenshots +- logs +- theories about what might be going wrong +- workarounds that you used +- links to related bugs, PRs or discussions diff --git a/.github/bors.toml b/.github/bors.toml index 9931de1ec54c6..d1735c98b9603 100644 --- a/.github/bors.toml +++ b/.github/bors.toml @@ -7,14 +7,16 @@ status = [ "build-wasm (nightly, ubuntu-latest)", "build-android", "markdownlint", - "check-markdown-links", "run-examples", + "run-examples-on-wasm", "check-doc", "check-missing-examples-in-docs", "check-unused-dependencies", "ci", "miri", "check-compiles", + "build-and-install-on-iOS", + "run-examples-on-windows", ] use_squash_merge = true diff --git a/.github/linters/markdown-link-check.json b/.github/linters/markdown-link-check.json deleted file mode 100644 index 9e2a95efc3ff6..0000000000000 --- a/.github/linters/markdown-link-check.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^https?://github\\.com/" - }, - { - "pattern": "^https?://docs\\.github\\.com/" - }, - { - "pattern": "^https?://reddit\\.com/" - } - ], - "replacementPatterns": [], - "httpHeaders": [ - { - "urls": ["https://crates.io"], - "headers": { - "Accept": "text/html" - } - } - ], - "timeout": "20s", - "retryOn429": true, - "retryCount": 5, - "fallbackRetryDelay": "30s", - "aliveStatusCodes": [200, 206] -} diff --git a/.github/start-wasm-example/.gitignore b/.github/start-wasm-example/.gitignore new file mode 100644 index 0000000000000..75e854d8dcf7a --- /dev/null +++ b/.github/start-wasm-example/.gitignore @@ -0,0 +1,4 @@ +node_modules/ +/test-results/ +/playwright-report/ +/playwright/.cache/ diff --git a/.github/start-wasm-example/package-lock.json b/.github/start-wasm-example/package-lock.json new file mode 100644 index 0000000000000..4a39b73a25660 --- /dev/null +++ b/.github/start-wasm-example/package-lock.json @@ -0,0 +1,76 @@ +{ + "name": "start-wasm-example", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "start-wasm-example", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "dotenv": "^16.0.1" + }, + "devDependencies": { + "@playwright/test": "^1.22.1" + } + }, + "node_modules/@playwright/test": { + "version": "1.22.1", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.22.1.tgz", + "integrity": "sha512-8ouMBUboYslHom41W8bnSEn0TwlAMHhCACwOZeuiAgzukj7KobpZ+UBwrGE0jJ0UblJbKAQNRHXL+z7sDSkb6g==", + "dev": true, + "dependencies": { + "playwright-core": "1.22.1" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/dotenv": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.1.tgz", + "integrity": "sha512-1K6hR6wtk2FviQ4kEiSjFiH5rpzEVi8WW0x96aztHVMhEspNpc4DVOUTEHtEva5VThQ8IaBX1Pe4gSzpVVUsKQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/playwright-core": { + "version": "1.22.1", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.22.1.tgz", + "integrity": "sha512-H+ZUVYnceWNXrRf3oxTEKAr81QzFsCKu5Fp//fEjQvqgKkfA1iX3E9DBrPJpPNOrgVzcE+IqeI0fDmYJe6Ynnw==", + "dev": true, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=14" + } + } + }, + "dependencies": { + "@playwright/test": { + "version": "1.22.1", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.22.1.tgz", + "integrity": "sha512-8ouMBUboYslHom41W8bnSEn0TwlAMHhCACwOZeuiAgzukj7KobpZ+UBwrGE0jJ0UblJbKAQNRHXL+z7sDSkb6g==", + "dev": true, + "requires": { + "playwright-core": "1.22.1" + } + }, + "dotenv": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.1.tgz", + "integrity": "sha512-1K6hR6wtk2FviQ4kEiSjFiH5rpzEVi8WW0x96aztHVMhEspNpc4DVOUTEHtEva5VThQ8IaBX1Pe4gSzpVVUsKQ==" + }, + "playwright-core": { + "version": "1.22.1", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.22.1.tgz", + "integrity": "sha512-H+ZUVYnceWNXrRf3oxTEKAr81QzFsCKu5Fp//fEjQvqgKkfA1iX3E9DBrPJpPNOrgVzcE+IqeI0fDmYJe6Ynnw==", + "dev": true + } + } +} diff --git a/.github/start-wasm-example/package.json b/.github/start-wasm-example/package.json new file mode 100644 index 0000000000000..9e10e8134e657 --- /dev/null +++ b/.github/start-wasm-example/package.json @@ -0,0 +1,16 @@ +{ + "name": "start-wasm-example", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": {}, + "keywords": [], + "author": "", + "license": "ISC", + "devDependencies": { + "@playwright/test": "^1.22.1" + }, + "dependencies": { + "dotenv": "^16.0.1" + } +} diff --git a/.github/start-wasm-example/playwright.config.ts b/.github/start-wasm-example/playwright.config.ts new file mode 100644 index 0000000000000..f5988c74a2695 --- /dev/null +++ b/.github/start-wasm-example/playwright.config.ts @@ -0,0 +1,107 @@ +import type { PlaywrightTestConfig } from '@playwright/test'; +import { devices } from '@playwright/test'; + +/** + * Read environment variables from file. + * https://github.com/motdotla/dotenv + */ +require('dotenv').config(); + +/** + * See https://playwright.dev/docs/test-configuration. + */ +const config: PlaywrightTestConfig = { + testDir: './tests', + /* Maximum time one test can run for. */ + timeout: 300_000, + expect: { + /** + * Maximum time expect() should wait for the condition to be met. + * For example in `await expect(locator).toHaveText();` + */ + timeout: 5000 + }, + /* Run tests in files in parallel */ + fullyParallel: true, + /* Fail the build on CI if you accidentally left test.only in the source code. */ + forbidOnly: !!process.env.CI, + /* Retry on CI only */ + retries: 0, + /* Opt out of parallel tests on CI. */ + workers: 1, + /* Reporter to use. See https://playwright.dev/docs/test-reporters */ + reporter: 'list', + /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ + use: { + /* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */ + actionTimeout: 0, + /* Base URL to use in actions like `await page.goto('/')`. */ + // baseURL: 'http://localhost:3000', + + /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ + trace: 'on-first-retry', + }, + + /* Configure projects for major browsers */ + projects: [ + { + name: 'chromium', + use: { + ...devices['Desktop Chrome'], + }, + }, + + { + name: 'firefox', + use: { + ...devices['Desktop Firefox'], + }, + }, + + { + name: 'webkit', + use: { + ...devices['Desktop Safari'], + }, + }, + + /* Test against mobile viewports. */ + // { + // name: 'Mobile Chrome', + // use: { + // ...devices['Pixel 5'], + // }, + // }, + // { + // name: 'Mobile Safari', + // use: { + // ...devices['iPhone 12'], + // }, + // }, + + /* Test against branded browsers. */ + // { + // name: 'Microsoft Edge', + // use: { + // channel: 'msedge', + // }, + // }, + // { + // name: 'Google Chrome', + // use: { + // channel: 'chrome', + // }, + // }, + ], + + /* Folder for test artifacts such as screenshots, videos, traces, etc. */ + // outputDir: 'test-results/', + + /* Run your local dev server before starting the tests */ + // webServer: { + // command: 'npm run start', + // port: 3000, + // }, +}; + +export default config; diff --git a/.github/start-wasm-example/tests/wasm_example.spec.ts b/.github/start-wasm-example/tests/wasm_example.spec.ts new file mode 100644 index 0000000000000..32bb4d9d2c6e3 --- /dev/null +++ b/.github/start-wasm-example/tests/wasm_example.spec.ts @@ -0,0 +1,51 @@ +import { test, expect, Page } from '@playwright/test'; + +test.beforeEach(async ({ page }) => { + await page.goto('http://localhost:8000/'); +}); + +const MAX_TIMEOUT_FOR_TEST = 300_000; + +test.describe('WASM example', () => { + test('Wait for success', async ({ page }, test_info) => { + let start = new Date().getTime(); + + let found = false; + while (new Date().getTime() - start < MAX_TIMEOUT_FOR_TEST) { + let msg = await promise_with_timeout(100, on_console(page), "no log found"); + if (msg.includes("no log found")) { + continue; + } + console.log(msg); + if (msg.includes("Test successful")) { + let prefix = process.env.SCREENSHOT_PREFIX === undefined ? "screenshot" : process.env.SCREENSHOT_PREFIX; + await page.screenshot({ path: `${prefix}-${test_info.project.name}.png`, fullPage: true }); + found = true; + break; + } + } + + expect(found).toBe(true); + }); + +}); + +function on_console(page) { + return new Promise(resolve => { + page.on('console', msg => resolve(msg.text())); + }); +} + +async function promise_with_timeout(time_limit, task, failure_value) { + let timeout; + const timeout_promise = new Promise((resolve, reject) => { + timeout = setTimeout(() => { + resolve(failure_value); + }, time_limit); + }); + const response = await Promise.race([task, timeout_promise]); + if (timeout) { + clearTimeout(timeout); + } + return response; +} \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2f7da5fa767ab..25b6b8766a67f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,18 +76,20 @@ jobs: timeout-minutes: 60 steps: - uses: actions/checkout@v3 - - uses: actions/cache@v3 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-miri-${{ hashFiles('**/Cargo.toml') }} + # TODO: re-enable cache once nightly is unpinned + # - uses: actions/cache@v3 + # with: + # path: | + # ~/.cargo/bin/ + # ~/.cargo/registry/index/ + # ~/.cargo/registry/cache/ + # ~/.cargo/git/db/ + # target/ + # key: ${{ runner.os }}-cargo-miri-${{ hashFiles('**/Cargo.toml') }} - uses: actions-rs/toolchain@v1 with: - toolchain: nightly + # TODO: check again with nightly once https://github.com/rust-lang/miri/issues/2223 is fixed + toolchain: nightly-2022-06-08 components: miri override: true - name: Install alsa and udev @@ -135,6 +137,7 @@ jobs: toolchain: [stable, nightly] os: [ubuntu-latest] runs-on: ${{ matrix.os }} + needs: build steps: - uses: actions/checkout@v3 - uses: actions/cache@v3 @@ -157,31 +160,6 @@ jobs: command: check args: --target wasm32-unknown-unknown --no-default-features --features bevy_winit,x11,hdr,bevy_gltf - build-android: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - uses: actions/cache@v3 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-build-android-${{ hashFiles('**/Cargo.toml') }} - - name: Uninstall android-31 - run: $ANDROID_SDK_ROOT/cmdline-tools/latest/bin/sdkmanager --uninstall "platforms;android-31" - - name: Install Android targets - run: rustup target add aarch64-linux-android armv7-linux-androideabi - - name: Install Cargo APK - run: cargo install --force cargo-apk - - name: Build APK - run: cargo apk build --example android - markdownlint: runs-on: ubuntu-latest needs: check-missing-examples-in-docs @@ -200,57 +178,9 @@ jobs: # Not needed here as only one Linter is used. #GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - check-markdown-links: - runs-on: ubuntu-latest - needs: markdownlint - if: always() - steps: - - uses: actions/checkout@v3 - - name: check dead links - continue-on-error: true - id: run1 - uses: gaurav-nelson/github-action-markdown-link-check@9710f0fec812ce0a3b98bef4c9d842fc1f39d976 - with: - use-quiet-mode: 'yes' - use-verbose-mode: 'yes' - config-file: '.github/linters/markdown-link-check.json' - - name: Sleep for 30 seconds - if: steps.run1.outcome=='failure' - run: sleep 30s - shell: bash - - name: check dead links (retry) - continue-on-error: true - id: run2 - if: steps.run1.outcome=='failure' - uses: gaurav-nelson/github-action-markdown-link-check@9710f0fec812ce0a3b98bef4c9d842fc1f39d976 - with: - use-quiet-mode: 'yes' - use-verbose-mode: 'yes' - config-file: '.github/linters/markdown-link-check.json' - - name: Sleep for 30 seconds - if: steps.run2.outcome=='failure' - run: sleep 30s - shell: bash - - name: check dead links (retry 2) - continue-on-error: true - id: run3 - if: steps.run2.outcome=='failure' - uses: gaurav-nelson/github-action-markdown-link-check@9710f0fec812ce0a3b98bef4c9d842fc1f39d976 - with: - use-quiet-mode: 'yes' - use-verbose-mode: 'yes' - config-file: '.github/linters/markdown-link-check.json' - - name: set the status - if: always() - run: | - if ${{ steps.run1.outcome=='success' || steps.run2.outcome=='success' || steps.run3.outcome=='success' }}; then - echo success - else - exit 1 - fi - run-examples: runs-on: ubuntu-latest + timeout-minutes: 30 steps: - name: Install Bevy dependencies run: | @@ -278,13 +208,13 @@ jobs: toolchain: stable - name: Build bevy run: | - cargo build --no-default-features --features "bevy_dynamic_plugin,bevy_gilrs,bevy_gltf,bevy_winit,render,png,hdr,x11,bevy_ci_testing,trace,trace_chrome,bevy_audio,vorbis" + cargo build --features "bevy_ci_testing,trace,trace_chrome" - name: Run examples run: | for example in .github/example-run/*.ron; do example_name=`basename $example .ron` echo "running $example_name - "`date` - time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example xvfb-run cargo run --example $example_name --no-default-features --features "bevy_dynamic_plugin,bevy_gilrs,bevy_gltf,bevy_winit,render,png,hdr,x11,bevy_ci_testing,trace,trace_chrome,bevy_audio,vorbis" + time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example xvfb-run cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" sleep 10 done zip traces.zip trace*.json diff --git a/.github/workflows/ios.yml b/.github/workflows/ios.yml deleted file mode 100644 index 7cc954acf9c80..0000000000000 --- a/.github/workflows/ios.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: iOS cron CI - -on: - schedule: - - cron: "0 0 * * *" - -env: - CARGO_TERM_COLOR: always - -jobs: - build: - runs-on: macos-latest - steps: - - uses: actions/checkout@v3 - - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - - uses: actions/cache@v3 - with: - path: | - target - key: ${{ runner.os }}-cargo-check-test-${{ matrix.toolchain }}-${{ hashFiles('**/Cargo.lock') }} - - - name: Add iOS targets - run: rustup target add aarch64-apple-ios x86_64-apple-ios - - - name: Build and install iOS app in iOS Simulator. - run: cd examples/ios && make install diff --git a/.github/workflows/post-release.yml b/.github/workflows/post-release.yml new file mode 100644 index 0000000000000..4cc3ca9bde68d --- /dev/null +++ b/.github/workflows/post-release.yml @@ -0,0 +1,59 @@ +name: Post-release version bump + +# how to trigger: https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow +on: + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +jobs: + ci: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Install cargo-release + run: cargo install cargo-release + + - name: Setup post-release version bump + run: | + # Set the commit author to the github-actions bot. See discussion here for more information: + # https://github.com/actions/checkout/issues/13#issuecomment-724415212 + # https://github.community/t/github-actions-bot-email-address/17204/6 + git config user.name 'Bevy Auto Releaser' + git config user.email '41898282+github-actions[bot]@users.noreply.github.com' + # Read the current version from Cargo.toml + current_version=$(cargo metadata --format-version 1 --no-deps | \ + jq --raw-output '.packages | .[] | select(.name == "bevy").version') + # Sanity check: current version should be 0.X.Y + if ! grep -q '^0\.[0-9]\+\.[0-9]\+$' <<< "${current_version}"; then + echo "Invalid version (not in 0.X.Y format): ${current_version}" + exit 1 + fi + minor_version=$(sed 's/^0\.\([0-9]\+\).*/\1/' <<< "${current_version}") + next_version=0.$((minor_version + 1)).0-dev + echo "Bumping version to ${next_version}" + # See release.yml for meaning of these arguments + cargo release "${next_version}" \ + --workspace \ + --no-publish \ + --execute \ + --no-tag \ + --no-confirm \ + --no-push \ + --exclude ci \ + --exclude errors \ + --exclude bevy-ios-example \ + --exclude spancmp \ + --exclude build-wasm-example + + - name: Create PR + uses: peter-evans/create-pull-request@v3 + with: + delete-branch: true + base: "main" + title: "Bump Version after Release" + body: | + Bump version after release + This PR has been auto-generated diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000000..d4702b33b5297 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,56 @@ +name: Release + +# how to trigger: https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow +on: + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +jobs: + ci: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Install cargo-release + run: cargo install cargo-release + + - name: Setup release + run: | + # Set the commit author to the github-actions bot. See discussion here for more information: + # https://github.com/actions/checkout/issues/13#issuecomment-724415212 + # https://github.community/t/github-actions-bot-email-address/17204/6 + git config user.name 'Bevy Auto Releaser' + git config user.email '41898282+github-actions[bot]@users.noreply.github.com' + # release: remove the dev suffix, like going from 0.X.0-dev to 0.X.0 + # --workspace: updating all crates in the workspace + # --no-publish: do not publish to crates.io + # --execute: not a dry run + # --no-tag: do not push tag for each new version + # --no-push: do not push the update commits + # --dependent-version upgrade: change 0.X.0-dev in internal dependencies to 0.X.0 + # --exclude: ignore those packages + cargo release release \ + --workspace \ + --no-publish \ + --execute \ + --no-tag \ + --no-confirm \ + --no-push \ + --dependent-version upgrade \ + --exclude ci \ + --exclude errors \ + --exclude bevy-ios-example \ + --exclude spancmp \ + --exclude build-wasm-example + + - name: Create PR + uses: peter-evans/create-pull-request@v3 + with: + delete-branch: true + base: "main" + title: "Preparing Next Release" + body: | + Preparing next release + This PR has been auto-generated diff --git a/.github/workflows/validation-jobs.yml b/.github/workflows/validation-jobs.yml new file mode 100644 index 0000000000000..ec060da4afb1e --- /dev/null +++ b/.github/workflows/validation-jobs.yml @@ -0,0 +1,157 @@ +name: validation jobs + +on: + push: + branches: + - staging + - trying + - main + +env: + CARGO_TERM_COLOR: always + +jobs: + build-and-install-on-iOS: + runs-on: macos-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + + - uses: actions/cache@v3 + with: + path: | + target + key: ${{ runner.os }}-ios-install-${{ matrix.toolchain }}-${{ hashFiles('**/Cargo.lock') }} + + - name: Add iOS targets + run: rustup target add aarch64-apple-ios x86_64-apple-ios + + - name: Build and install iOS app in iOS Simulator. + run: cd examples/ios && make install + + build-android: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + + - uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-build-android-${{ hashFiles('**/Cargo.toml') }} + + - name: Uninstall android-31 + run: $ANDROID_SDK_ROOT/cmdline-tools/latest/bin/sdkmanager --uninstall "platforms;android-31" + + - name: Install Android targets + run: rustup target add aarch64-linux-android armv7-linux-androideabi + + - name: Install Cargo APK + run: cargo install --force cargo-apk + + - name: Build APK + run: cargo apk build --example android + + run-examples-on-windows: + runs-on: windows-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v3 + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + + - uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-windows-run-examples-${{ hashFiles('**/Cargo.toml') }} + + - name: Build bevy + run: | + cargo build --features "bevy_ci_testing" + + - name: Run examples + shell: bash + run: | + for example in .github/example-run/*.ron; do + example_name=`basename $example .ron` + echo "running $example_name - "`date` + time CI_TESTING_CONFIG=$example cargo run --example $example_name --features "bevy_ci_testing" + sleep 10 + done + + run-examples-on-wasm: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v3 + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: wasm32-unknown-unknown + override: true + + - uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + ~/.github/start-wasm-example/node_modules + target/ + key: ${{ runner.os }}-wasm-run-examples-${{ hashFiles('**/Cargo.toml') }} + + - name: install xvfb, llvmpipe and lavapipe + run: | + sudo apt-get update -y -qq + sudo add-apt-repository ppa:oibaf/graphics-drivers -y + sudo apt-get update + sudo apt install -y xvfb libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers + + - name: Install wasm-bindgen + run: cargo install --force wasm-bindgen-cli + + - name: Setup playwright + run: | + cd .github/start-wasm-example + npm install + npx playwright install --with-deps + cd ../.. + + - name: First WASM build + run: | + cargo build --release --example ui --target wasm32-unknown-unknown + + - name: Run examples + shell: bash + run: | + # start a webserver + python3 -m http.server --directory examples/wasm & + + xvfb-run cargo run -p build-wasm-example -- --browsers chromium --browsers firefox --frames 25 --test shapes lighting text_debug breakout + + - name: Save screenshots + uses: actions/upload-artifact@v3 + with: + name: screenshots + path: .github/start-wasm-example/screenshot-*.png diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 20541abdb77d7..e359130848da3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -74,9 +74,9 @@ He makes the final decision on both design and code changes within Bevy in order In practice, @cart serves as a shockingly accountable dictator: open to new ideas and to changing his mind in the face of compelling arguments or community consensus. Check out the next section for details on how this plays out. -[Bevy Org members](https://github.com/orgs/bevyengine/people) are contributors who have: +[Bevy Org members](https://github.com/orgs/bevyengine/people) are contributors who: -1. Have actively engaged with Bevy development +1. Have actively engaged with Bevy development. 2. Have demonstrated themselves to be polite and welcoming representatives of the project with an understanding of our goals and direction. 3. Have asked to join the Bevy Org. Reach out to @cart on Discord or email us at bevyengine@gmail.com if you are interested. Everyone is welcome to do this. We generally accept membership requests, so don't hesitate if you are interested! @@ -198,9 +198,9 @@ Check out our [plugin guidelines](https://github.com/bevyengine/bevy/blob/main/d ### Fixing bugs -Bugs in Bevy (or the associated website / book) are filed on the issue tracker using the [`bug`](https://github.com/bevyengine/bevy/issues?q=is%3Aissue+is%3Aopen+label%3Abug) label. +Bugs in Bevy (or the associated website / book) are filed on the issue tracker using the [`C-Bug`](https://github.com/bevyengine/bevy/issues?q=is%3Aissue+is%3Aopen+label%3AC-Bug) label. -If you're looking for an easy place to start, take a look at the [`E-Good-First-Issue`](https://github.com/bevyengine/bevy/issues?q=is%3Aopen+is%3Aissue+label%3AE-Good-First-Issue) label, and feel free to ask questions on that issue's thread in question or on Discord. +If you're looking for an easy place to start, take a look at the [`D-Good-First-Issue`](https://github.com/bevyengine/bevy/issues?q=is%3Aopen+is%3Aissue+label%3AD-Good-First-Issue) label, and feel free to ask questions on that issue's thread in question or on Discord. You don't need anyone's permission to try fixing a bug or adding a simple feature, but stating that you'd like to tackle an issue can be helpful to avoid duplicated work. When you make a pull request that fixes an issue, include a line that says `Fixes #X` (or "Closes"), where `X` is the issue number. diff --git a/Cargo.toml b/Cargo.toml index 0c8a9a5d0441d..ab114c9d000e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ version = "0.8.0-dev" edition = "2021" categories = ["game-engines", "graphics", "gui", "rendering"] description = "A refreshingly simple data-driven game engine and app framework" -exclude = ["assets/**/*", "tools/**/*", ".github/**/*", "crates/**/*"] +exclude = ["assets/", "tools/", ".github/", "crates/", "examples/wasm/assets/"] homepage = "https://bevyengine.org" keywords = ["game", "engine", "gamedev", "graphics", "bevy"] license = "MIT OR Apache-2.0" @@ -13,7 +13,7 @@ repository = "https://github.com/bevyengine/bevy" [workspace] exclude = ["benches", "crates/bevy_ecs_compile_fail_tests"] -members = ["crates/*", "examples/ios", "tools/ci", "tools/spancmp", "errors"] +members = ["crates/*", "examples/ios", "tools/ci", "tools/spancmp", "tools/build-wasm-example", "errors"] [features] default = [ @@ -143,6 +143,10 @@ path = "examples/2d/mesh2d.rs" name = "mesh2d_manual" path = "examples/2d/mesh2d_manual.rs" +[[example]] +name = "mesh2d_vertex_color_texture" +path = "examples/2d/mesh2d_vertex_color_texture.rs" + [[example]] name = "shapes" path = "examples/2d/shapes.rs" @@ -167,11 +171,19 @@ path = "examples/2d/text2d.rs" name = "texture_atlas" path = "examples/2d/texture_atlas.rs" +[[example]] +name = "transparency_2d" +path = "examples/2d/transparency_2d.rs" + # 3D Rendering [[example]] name = "3d_scene" path = "examples/3d/3d_scene.rs" +[[example]] +name = "3d_shapes" +path = "examples/3d/shapes.rs" + [[example]] name = "lighting" path = "examples/3d/lighting.rs" @@ -208,10 +220,6 @@ path = "examples/3d/render_to_texture.rs" name = "shadow_biases" path = "examples/3d/shadow_biases.rs" -[[example]] -name = "3d_shapes" -path = "examples/3d/shapes.rs" - [[example]] name = "shadow_caster_receiver" path = "examples/3d/shadow_caster_receiver.rs" @@ -220,10 +228,18 @@ path = "examples/3d/shadow_caster_receiver.rs" name = "spherical_area_lights" path = "examples/3d/spherical_area_lights.rs" +[[example]] +name = "split_screen" +path = "examples/3d/split_screen.rs" + [[example]] name = "texture" path = "examples/3d/texture.rs" +[[example]] +name = "transparency_3d" +path = "examples/3d/transparency_3d.rs" + [[example]] name = "two_passes" path = "examples/3d/two_passes.rs" @@ -508,6 +524,10 @@ path = "examples/scene/scene.rs" name = "custom_vertex_attribute" path = "examples/shader/custom_vertex_attribute.rs" +[[example]] +name = "post_processing" +path = "examples/shader/post_processing.rs" + [[example]] name = "shader_defs" path = "examples/shader/shader_defs.rs" @@ -606,6 +626,10 @@ path = "examples/ui/text.rs" name = "text_debug" path = "examples/ui/text_debug.rs" +[[example]] +name = "transparency_ui" +path = "examples/ui/transparency_ui.rs" + [[example]] name = "ui" path = "examples/ui/ui.rs" diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 9035c3b21aa77..0000000000000 --- a/LICENSE +++ /dev/null @@ -1,6 +0,0 @@ -Bevy is dual-licensed under either - -* MIT License (docs/LICENSE-MIT or http://opensource.org/licenses/MIT) -* Apache License, Version 2.0 (docs/LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) - -at your option. diff --git a/docs/LICENSE-APACHE b/LICENSE-APACHE similarity index 100% rename from docs/LICENSE-APACHE rename to LICENSE-APACHE diff --git a/docs/LICENSE-MIT b/LICENSE-MIT similarity index 100% rename from docs/LICENSE-MIT rename to LICENSE-MIT diff --git a/README.md b/README.md index a5ca4d8180570..41c2f9f0ae8da 100644 --- a/README.md +++ b/README.md @@ -100,8 +100,8 @@ Additionally, we would like to thank the [Amethyst](https://github.com/amethyst/ Bevy is free and open source! All code in this repository is dual-licensed under either: -* MIT License ([LICENSE-MIT](docs/LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT)) -* Apache License, Version 2.0 ([LICENSE-APACHE](docs/LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) +* MIT License ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT)) +* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) at your option. This means you can select the license you prefer! This dual-licensing approach is the de-facto standard in the Rust ecosystem and there are [very good reasons](https://github.com/bevyengine/bevy/issues/2373) to include both. diff --git a/assets/shaders/animate_shader.wgsl b/assets/shaders/animate_shader.wgsl index fdc60da00b539..d71b30bfafe7c 100644 --- a/assets/shaders/animate_shader.wgsl +++ b/assets/shaders/animate_shader.wgsl @@ -1,9 +1,12 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_view_bindings [[group(1), binding(0)]] var mesh: Mesh; +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + struct Vertex { [[location(0)]] position: vec3; [[location(1)]] normal: vec3; @@ -17,10 +20,8 @@ struct VertexOutput { [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { - let world_position = mesh.model * vec4(vertex.position, 1.0); - var out: VertexOutput; - out.clip_position = view.view_proj * world_position; + out.clip_position = mesh_position_local_to_clip(mesh.model, vec4(vertex.position, 1.0)); out.uv = vertex.uv; return out; } diff --git a/assets/shaders/custom_material_chromatic_aberration.wgsl b/assets/shaders/custom_material_chromatic_aberration.wgsl new file mode 100644 index 0000000000000..811cfb8810abc --- /dev/null +++ b/assets/shaders/custom_material_chromatic_aberration.wgsl @@ -0,0 +1,25 @@ +#import bevy_pbr::mesh_view_bindings + +[[group(1), binding(0)]] +var texture: texture_2d; + +[[group(1), binding(1)]] +var our_sampler: sampler; + + +[[stage(fragment)]] +fn fragment([[builtin(position)]] position: vec4) -> [[location(0)]] vec4 { + // Get screen position with coordinates from 0 to 1 + let uv = position.xy / vec2(view.width, view.height); + let offset_strength = 0.02; + + // Sample each color channel with an arbitrary shift + var output_color = vec4( + textureSample(texture, our_sampler, uv + vec2(offset_strength, -offset_strength)).r, + textureSample(texture, our_sampler, uv + vec2(-offset_strength, 0.0)).g, + textureSample(texture, our_sampler, uv + vec2(0.0, offset_strength)).b, + 1.0 + ); + + return output_color; +} diff --git a/assets/shaders/custom_material_screenspace_texture.wgsl b/assets/shaders/custom_material_screenspace_texture.wgsl index a1fa6998f20d2..2228b79083af9 100644 --- a/assets/shaders/custom_material_screenspace_texture.wgsl +++ b/assets/shaders/custom_material_screenspace_texture.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::mesh_view_bind_group +#import bevy_pbr::mesh_view_bindings [[group(1), binding(0)]] var texture: texture_2d; diff --git a/assets/shaders/custom_vertex_attribute.wgsl b/assets/shaders/custom_vertex_attribute.wgsl index 3e42b03d710ae..2def33a098602 100644 --- a/assets/shaders/custom_vertex_attribute.wgsl +++ b/assets/shaders/custom_vertex_attribute.wgsl @@ -1,10 +1,5 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct - -struct Vertex { - [[location(0)]] position: vec3; - [[location(1)]] blend_color: vec4; -}; +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_bindings struct CustomMaterial { color: vec4; @@ -12,8 +7,13 @@ struct CustomMaterial { [[group(1), binding(0)]] var material: CustomMaterial; -[[group(2), binding(0)]] -var mesh: Mesh; +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + +struct Vertex { + [[location(0)]] position: vec3; + [[location(1)]] blend_color: vec4; +}; struct VertexOutput { [[builtin(position)]] clip_position: vec4; @@ -22,10 +22,8 @@ struct VertexOutput { [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { - let world_position = mesh.model * vec4(vertex.position, 1.0); - var out: VertexOutput; - out.clip_position = view.view_proj * world_position; + out.clip_position = mesh_position_local_to_clip(mesh.model, vec4(vertex.position, 1.0)); out.blend_color = vertex.blend_color; return out; } diff --git a/assets/shaders/instancing.wgsl b/assets/shaders/instancing.wgsl index 262df7224b887..c83a853b07842 100644 --- a/assets/shaders/instancing.wgsl +++ b/assets/shaders/instancing.wgsl @@ -1,9 +1,12 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_view_bindings [[group(1), binding(0)]] var mesh: Mesh; +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + struct Vertex { [[location(0)]] position: vec3; [[location(1)]] normal: vec3; @@ -21,10 +24,8 @@ struct VertexOutput { [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { let position = vertex.position * vertex.i_pos_scale.w + vertex.i_pos_scale.xyz; - let world_position = mesh.model * vec4(position, 1.0); - var out: VertexOutput; - out.clip_position = view.view_proj * world_position; + out.clip_position = mesh_position_local_to_clip(mesh.model, vec4(position, 1.0)); out.color = vertex.i_color; return out; } diff --git a/assets/shaders/shader_defs.wgsl b/assets/shaders/shader_defs.wgsl index 0d1c93d37e5ea..a26e988ed951a 100644 --- a/assets/shaders/shader_defs.wgsl +++ b/assets/shaders/shader_defs.wgsl @@ -1,9 +1,12 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_view_bindings [[group(1), binding(0)]] var mesh: Mesh; +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + struct Vertex { [[location(0)]] position: vec3; [[location(1)]] normal: vec3; @@ -16,17 +19,15 @@ struct VertexOutput { [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { - let world_position = mesh.model * vec4(vertex.position, 1.0); - var out: VertexOutput; - out.clip_position = view.view_proj * world_position; + out.clip_position = mesh_position_local_to_clip(mesh.model, vec4(vertex.position, 1.0)); return out; } [[stage(fragment)]] fn fragment() -> [[location(0)]] vec4 { var color = vec4(0.0, 0.0, 1.0, 1.0); -# ifdef IS_RED +# ifdef IS_RED color = vec4(1.0, 0.0, 0.0, 1.0); # endif return color; diff --git a/benches/Cargo.toml b/benches/Cargo.toml index eeb69a2413e65..6ba2ad92809fe 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -11,6 +11,7 @@ glam = "0.20" rand = "0.8" rand_chacha = "0.3" criterion = { version = "0.3", features = ["html_reports"] } +bevy_app = { path = "../crates/bevy_app" } bevy_ecs = { path = "../crates/bevy_ecs" } bevy_reflect = { path = "../crates/bevy_reflect" } bevy_tasks = { path = "../crates/bevy_tasks" } @@ -46,6 +47,11 @@ name = "world_get" path = "benches/bevy_ecs/world_get.rs" harness = false +[[bench]] +name = "schedule" +path = "benches/bevy_ecs/schedule.rs" +harness = false + [[bench]] name = "reflect_list" path = "benches/bevy_reflect/list.rs" diff --git a/benches/benches/bevy_ecs/ecs_bench_suite/heavy_compute.rs b/benches/benches/bevy_ecs/ecs_bench_suite/heavy_compute.rs index 4ddae1781ea1b..169f32f06500e 100644 --- a/benches/benches/bevy_ecs/ecs_bench_suite/heavy_compute.rs +++ b/benches/benches/bevy_ecs/ecs_bench_suite/heavy_compute.rs @@ -1,5 +1,5 @@ use bevy_ecs::prelude::*; -use bevy_tasks::TaskPool; +use bevy_tasks::{ComputeTaskPool, TaskPool}; use glam::*; #[derive(Component, Copy, Clone)] @@ -18,6 +18,8 @@ pub struct Benchmark(World, Box>); impl Benchmark { pub fn new() -> Self { + ComputeTaskPool::init(TaskPool::default); + let mut world = World::default(); world.spawn_batch((0..1000).map(|_| { @@ -29,8 +31,8 @@ impl Benchmark { ) })); - fn sys(task_pool: Res, mut query: Query<(&mut Position, &mut Transform)>) { - query.par_for_each_mut(&task_pool, 128, |(mut pos, mut mat)| { + fn sys(mut query: Query<(&mut Position, &mut Transform)>) { + query.par_for_each_mut(128, |(mut pos, mut mat)| { for _ in 0..100 { mat.0 = mat.0.inverse(); } @@ -39,7 +41,6 @@ impl Benchmark { }); } - world.insert_resource(TaskPool::default()); let mut system = IntoSystem::into_system(sys); system.initialize(&mut world); system.update_archetype_component_access(&world); diff --git a/benches/benches/bevy_ecs/schedule.rs b/benches/benches/bevy_ecs/schedule.rs new file mode 100644 index 0000000000000..0b8916e98b1e8 --- /dev/null +++ b/benches/benches/bevy_ecs/schedule.rs @@ -0,0 +1,69 @@ +use bevy_app::App; +use bevy_ecs::prelude::*; +use criterion::{criterion_group, criterion_main, Criterion}; + +criterion_group!(benches, build_schedule); +criterion_main!(benches); + +fn build_schedule(criterion: &mut Criterion) { + // empty system + fn empty_system() {} + + // Use multiple different kinds of label to ensure that dynamic dispatch + // doesn't somehow get optimized away. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, SystemLabel)] + struct NumLabel(usize); + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, SystemLabel)] + struct DummyLabel; + + let mut group = criterion.benchmark_group("build_schedule"); + group.warm_up_time(std::time::Duration::from_millis(500)); + group.measurement_time(std::time::Duration::from_secs(15)); + + // Method: generate a set of `graph_size` systems which have a One True Ordering. + // Add system to the stage with full constraints. Hopefully this should be maximimally + // difficult for bevy to figure out. + let labels: Vec<_> = (0..1000).map(NumLabel).collect(); + + // Benchmark graphs of different sizes. + for graph_size in [100, 500, 1000] { + // Basic benchmark without constraints. + group.bench_function(format!("{graph_size}_schedule_noconstraints"), |bencher| { + bencher.iter(|| { + let mut app = App::new(); + for _ in 0..graph_size { + app.add_system(empty_system); + } + app.update(); + }); + }); + + // Benchmark with constraints. + group.bench_function(format!("{graph_size}_schedule"), |bencher| { + bencher.iter(|| { + let mut app = App::new(); + app.add_system(empty_system.label(DummyLabel)); + + // Build a fully-connected dependency graph describing the One True Ordering. + // Not particularly realistic but this can be refined later. + for i in 0..graph_size { + let mut sys = empty_system.label(labels[i]).before(DummyLabel); + for a in 0..i { + sys = sys.after(labels[a]); + } + for b in i + 1..graph_size { + sys = sys.before(labels[b]); + } + app.add_system(sys); + } + // Run the app for a single frame. + // This is necessary since dependency resolution does not occur until the game runs. + // FIXME: Running the game clutters up the benchmarks, so ideally we'd be + // able to benchmark the dependency resolution directly. + app.update(); + }); + }); + } + + group.finish(); +} diff --git a/crates/bevy_animation/Cargo.toml b/crates/bevy_animation/Cargo.toml index fdb9bd164ce6b..2ef00a7c53c6c 100644 --- a/crates/bevy_animation/Cargo.toml +++ b/crates/bevy_animation/Cargo.toml @@ -15,6 +15,7 @@ bevy_asset = { path = "../bevy_asset", version = "0.8.0-dev" } bevy_core = { path = "../bevy_core", version = "0.8.0-dev" } bevy_math = { path = "../bevy_math", version = "0.8.0-dev" } bevy_reflect = { path = "../bevy_reflect", version = "0.8.0-dev", features = ["bevy"] } +bevy_time = { path = "../bevy_time", version = "0.8.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.8.0-dev" } diff --git a/crates/bevy_animation/src/lib.rs b/crates/bevy_animation/src/lib.rs index c894207dcd71e..7882ad4d2f274 100644 --- a/crates/bevy_animation/src/lib.rs +++ b/crates/bevy_animation/src/lib.rs @@ -6,7 +6,7 @@ use std::ops::Deref; use bevy_app::{App, CoreStage, Plugin}; use bevy_asset::{AddAsset, Assets, Handle}; -use bevy_core::{Name, Time}; +use bevy_core::Name; use bevy_ecs::{ change_detection::DetectChanges, entity::Entity, @@ -18,6 +18,7 @@ use bevy_ecs::{ use bevy_hierarchy::{Children, HierarchySystem}; use bevy_math::{Quat, Vec3}; use bevy_reflect::{Reflect, TypeUuid}; +use bevy_time::Time; use bevy_transform::{prelude::Transform, TransformSystem}; use bevy_utils::{tracing::warn, HashMap}; @@ -229,7 +230,7 @@ pub fn animation_player( match &curve.keyframes { Keyframes::Rotation(keyframes) => transform.rotation = keyframes[0], Keyframes::Translation(keyframes) => { - transform.translation = keyframes[0] + transform.translation = keyframes[0]; } Keyframes::Scale(keyframes) => transform.scale = keyframes[0], } diff --git a/crates/bevy_app/Cargo.toml b/crates/bevy_app/Cargo.toml index 602dd466e798c..2dd25147e42f9 100644 --- a/crates/bevy_app/Cargo.toml +++ b/crates/bevy_app/Cargo.toml @@ -12,13 +12,15 @@ keywords = ["bevy"] trace = [] bevy_ci_testing = ["serde", "ron"] default = ["bevy_reflect"] +bevy_reflect = ["dep:bevy_reflect", "bevy_ecs/bevy_reflect"] [dependencies] # bevy bevy_derive = { path = "../bevy_derive", version = "0.8.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev", default-features = false } bevy_reflect = { path = "../bevy_reflect", version = "0.8.0-dev", optional = true } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } +bevy_tasks = { path = "../bevy_tasks", version = "0.8.0-dev" } # other serde = { version = "1.0", features = ["derive"], optional = true } diff --git a/crates/bevy_app/src/ci_testing.rs b/crates/bevy_app/src/ci_testing.rs index b319f04a91750..66206d91095c3 100644 --- a/crates/bevy_app/src/ci_testing.rs +++ b/crates/bevy_app/src/ci_testing.rs @@ -1,6 +1,8 @@ use crate::{app::AppExit, App}; use serde::Deserialize; +use bevy_utils::tracing::info; + /// A configuration struct for automated CI testing. /// /// It gets used when the `bevy_ci_testing` feature is enabled to automatically @@ -20,18 +22,29 @@ fn ci_testing_exit_after( if let Some(exit_after) = ci_testing_config.exit_after { if *current_frame > exit_after { app_exit_events.send(AppExit); + info!("Exiting after {} frames. Test successful!", exit_after); } } *current_frame += 1; } pub(crate) fn setup_app(app: &mut App) -> &mut App { - let filename = - std::env::var("CI_TESTING_CONFIG").unwrap_or_else(|_| "ci_testing_config.ron".to_string()); - let config: CiTestingConfig = ron::from_str( - &std::fs::read_to_string(filename).expect("error reading CI testing configuration file"), - ) - .expect("error deserializing CI testing configuration file"); + #[cfg(not(target_arch = "wasm32"))] + let config: CiTestingConfig = { + let filename = std::env::var("CI_TESTING_CONFIG") + .unwrap_or_else(|_| "ci_testing_config.ron".to_string()); + ron::from_str( + &std::fs::read_to_string(filename) + .expect("error reading CI testing configuration file"), + ) + .expect("error deserializing CI testing configuration file") + }; + #[cfg(target_arch = "wasm32")] + let config: CiTestingConfig = { + let config = include_str!("../../../ci_testing_config.ron"); + ron::from_str(config).expect("error deserializing CI testing configuration file") + }; + app.insert_resource(config) .add_system(ci_testing_exit_after); diff --git a/crates/bevy_app/src/plugin_group.rs b/crates/bevy_app/src/plugin_group.rs index 75d450032aa7b..d707634fd8503 100644 --- a/crates/bevy_app/src/plugin_group.rs +++ b/crates/bevy_app/src/plugin_group.rs @@ -128,7 +128,7 @@ impl PluginGroupBuilder { /// Consumes the [`PluginGroupBuilder`] and [builds](Plugin::build) the contained [`Plugin`]s /// in the order specified. pub fn finish(self, app: &mut App) { - for ty in self.order.iter() { + for ty in &self.order { if let Some(entry) = self.plugins.get(ty) { if entry.enabled { debug!("added plugin: {}", entry.plugin.name()); @@ -173,7 +173,7 @@ mod tests { std::any::TypeId::of::(), std::any::TypeId::of::(), ] - ) + ); } #[test] @@ -190,7 +190,7 @@ mod tests { std::any::TypeId::of::(), std::any::TypeId::of::(), ] - ) + ); } #[test] @@ -207,7 +207,7 @@ mod tests { std::any::TypeId::of::(), std::any::TypeId::of::(), ] - ) + ); } #[test] @@ -225,7 +225,7 @@ mod tests { std::any::TypeId::of::(), std::any::TypeId::of::(), ] - ) + ); } #[test] @@ -243,7 +243,7 @@ mod tests { std::any::TypeId::of::(), std::any::TypeId::of::(), ] - ) + ); } #[test] @@ -261,6 +261,6 @@ mod tests { std::any::TypeId::of::(), std::any::TypeId::of::(), ] - ) + ); } } diff --git a/crates/bevy_asset/Cargo.toml b/crates/bevy_asset/Cargo.toml index 550a5b0330e33..5ccf6ac63f5c9 100644 --- a/crates/bevy_asset/Cargo.toml +++ b/crates/bevy_asset/Cargo.toml @@ -40,7 +40,7 @@ wasm-bindgen-futures = "0.4" js-sys = "0.3" [target.'cfg(target_os = "android")'.dependencies] -ndk-glue = { version = "0.6" } +ndk-glue = { version = "0.5" } [dev-dependencies] futures-lite = "1.4.0" diff --git a/crates/bevy_asset/src/asset_server.rs b/crates/bevy_asset/src/asset_server.rs index 04d1f3bda07d4..bc7defc361c52 100644 --- a/crates/bevy_asset/src/asset_server.rs +++ b/crates/bevy_asset/src/asset_server.rs @@ -7,7 +7,7 @@ use crate::{ use anyhow::Result; use bevy_ecs::system::{Res, ResMut}; use bevy_log::warn; -use bevy_tasks::TaskPool; +use bevy_tasks::IoTaskPool; use bevy_utils::{Entry, HashMap, Uuid}; use crossbeam_channel::TryRecvError; use parking_lot::{Mutex, RwLock}; @@ -56,7 +56,6 @@ pub struct AssetServerInternal { loaders: RwLock>>, extension_to_loader_index: RwLock>, handle_to_path: Arc>>>, - task_pool: TaskPool, } /// Loads assets from the filesystem on background threads @@ -66,11 +65,11 @@ pub struct AssetServer { } impl AssetServer { - pub fn new(source_io: T, task_pool: TaskPool) -> Self { - Self::with_boxed_io(Box::new(source_io), task_pool) + pub fn new(source_io: T) -> Self { + Self::with_boxed_io(Box::new(source_io)) } - pub fn with_boxed_io(asset_io: Box, task_pool: TaskPool) -> Self { + pub fn with_boxed_io(asset_io: Box) -> Self { AssetServer { server: Arc::new(AssetServerInternal { loaders: Default::default(), @@ -79,7 +78,6 @@ impl AssetServer { asset_ref_counter: Default::default(), handle_to_path: Default::default(), asset_lifecycles: Default::default(), - task_pool, asset_io, }), } @@ -315,7 +313,6 @@ impl AssetServer { &self.server.asset_ref_counter.channel, self.asset_io(), version, - &self.server.task_pool, ); if let Err(err) = asset_loader @@ -377,8 +374,7 @@ impl AssetServer { pub(crate) fn load_untracked(&self, asset_path: AssetPath<'_>, force: bool) -> HandleId { let server = self.clone(); let owned_path = asset_path.to_owned(); - self.server - .task_pool + IoTaskPool::get() .spawn(async move { if let Err(err) = server.load_async(owned_path, force).await { warn!("{}", err); @@ -620,8 +616,8 @@ mod test { fn setup(asset_path: impl AsRef) -> AssetServer { use crate::FileAssetIo; - - AssetServer::new(FileAssetIo::new(asset_path, false), Default::default()) + IoTaskPool::init(Default::default); + AssetServer::new(FileAssetIo::new(asset_path, false)) } #[test] @@ -781,8 +777,11 @@ mod test { asset_server.get_handle_untyped(id) } - fn get_asset(id: impl Into, world: &World) -> Option<&PngAsset> { - world.resource::>().get(id.into()) + fn get_asset<'world>( + id: &Handle, + world: &'world World, + ) -> Option<&'world PngAsset> { + world.resource::>().get(id) } fn get_load_state(id: impl Into, world: &World) -> LoadState { @@ -800,7 +799,7 @@ mod test { ); // load the asset - let handle = load_asset(path.clone(), &app.world); + let handle = load_asset(path.clone(), &app.world).typed(); let weak_handle = handle.clone_weak(); // asset is loading @@ -826,7 +825,7 @@ mod test { assert!(get_asset(&weak_handle, &app.world).is_none()); // finally, reload the asset - let handle = load_asset(path.clone(), &app.world); + let handle = load_asset(path.clone(), &app.world).typed(); assert_eq!(LoadState::Loading, get_load_state(&handle, &app.world)); app.update(); assert_eq!(LoadState::Loaded, get_load_state(&handle, &app.world)); diff --git a/crates/bevy_asset/src/assets.rs b/crates/bevy_asset/src/assets.rs index 0a111ffbd8867..23ec2cf3dee66 100644 --- a/crates/bevy_asset/src/assets.rs +++ b/crates/bevy_asset/src/assets.rs @@ -129,12 +129,12 @@ impl Assets { /// /// This is the main method for accessing asset data from an [Assets] collection. If you need /// mutable access to the asset, use [`get_mut`](Assets::get_mut). - pub fn get>(&self, handle: H) -> Option<&T> { + pub fn get(&self, handle: &Handle) -> Option<&T> { self.assets.get(&handle.into()) } /// Checks if an asset exists for the given handle - pub fn contains>(&self, handle: H) -> bool { + pub fn contains(&self, handle: &Handle) -> bool { self.assets.contains_key(&handle.into()) } @@ -142,7 +142,7 @@ impl Assets { /// /// This is the main method for mutably accessing asset data from an [Assets] collection. If you /// do not need mutable access to the asset, you may also use [get](Assets::get). - pub fn get_mut>(&mut self, handle: H) -> Option<&mut T> { + pub fn get_mut(&mut self, handle: &Handle) -> Option<&mut T> { let id: HandleId = handle.into(); self.events.send(AssetEvent::Modified { handle: Handle::weak(id), @@ -398,6 +398,6 @@ mod tests { let handle = assets_before.add(MyAsset); app.add_asset::(); // Ensure this doesn't overwrite the Asset let assets_after = app.world.resource_mut::>(); - assert!(assets_after.get(handle).is_some()); + assert!(assets_after.get(&handle).is_some()); } } diff --git a/crates/bevy_asset/src/debug_asset_server.rs b/crates/bevy_asset/src/debug_asset_server.rs index d4970dabcc071..b7e3c71c85c89 100644 --- a/crates/bevy_asset/src/debug_asset_server.rs +++ b/crates/bevy_asset/src/debug_asset_server.rs @@ -58,14 +58,14 @@ impl Default for HandleMap { impl Plugin for DebugAssetServerPlugin { fn build(&self, app: &mut bevy_app::App) { + IoTaskPool::init(|| { + TaskPoolBuilder::default() + .num_threads(2) + .thread_name("Debug Asset Server IO Task Pool".to_string()) + .build() + }); let mut debug_asset_app = App::new(); debug_asset_app - .insert_resource(IoTaskPool( - TaskPoolBuilder::default() - .num_threads(2) - .thread_name("Debug Asset Server IO Task Pool".to_string()) - .build(), - )) .insert_resource(AssetServerSettings { asset_folder: "crates".to_string(), watch_for_changes: true, @@ -93,8 +93,7 @@ pub(crate) fn sync_debug_assets( let (changed_shaders, handle_map, debug_assets) = state.get_mut(world); for changed in changed_shaders.iter_current_update_events() { let debug_handle = match changed { - AssetEvent::Created { handle } => handle, - AssetEvent::Modified { handle } => handle, + AssetEvent::Created { handle } | AssetEvent::Modified { handle } => handle, AssetEvent::Removed { .. } => continue, }; if let Some(handle) = handle_map.handles.get(debug_handle) { diff --git a/crates/bevy_asset/src/diagnostic/asset_count_diagnostics_plugin.rs b/crates/bevy_asset/src/diagnostic/asset_count_diagnostics_plugin.rs index 8224edd3893da..d5f64c8a455e8 100644 --- a/crates/bevy_asset/src/diagnostic/asset_count_diagnostics_plugin.rs +++ b/crates/bevy_asset/src/diagnostic/asset_count_diagnostics_plugin.rs @@ -48,6 +48,6 @@ impl AssetCountDiagnosticsPlugin { } pub fn diagnostic_system(mut diagnostics: ResMut, assets: Res>) { - diagnostics.add_measurement(Self::diagnostic_id(), assets.len() as f64); + diagnostics.add_measurement(Self::diagnostic_id(), || assets.len() as f64); } } diff --git a/crates/bevy_asset/src/handle.rs b/crates/bevy_asset/src/handle.rs index 06fdee6907a37..e929371e2fdbc 100644 --- a/crates/bevy_asset/src/handle.rs +++ b/crates/bevy_asset/src/handle.rs @@ -10,7 +10,7 @@ use crate::{ Asset, Assets, }; use bevy_ecs::{component::Component, reflect::ReflectComponent}; -use bevy_reflect::{FromReflect, Reflect, ReflectDeserialize}; +use bevy_reflect::{FromReflect, Reflect, ReflectDeserialize, ReflectSerialize}; use bevy_utils::Uuid; use crossbeam_channel::{Receiver, Sender}; use serde::{Deserialize, Serialize}; @@ -169,7 +169,7 @@ impl Handle { /// Makes this handle Strong if it wasn't already. /// /// This method requires the corresponding [Assets](crate::Assets) collection - pub fn make_strong(&mut self, assets: &mut Assets) { + pub fn make_strong(&mut self, assets: &Assets) { if self.is_strong() { return; } @@ -341,6 +341,14 @@ impl HandleUntyped { matches!(self.handle_type, HandleType::Strong(_)) } + /// Create a weak typed [`Handle`] from this handle. + /// + /// If this handle is strong and dropped, there is no guarantee that the asset + /// will still be available (if only the returned handle is kept) + pub fn typed_weak(&self) -> Handle { + self.clone_weak().typed() + } + /// Convert this handle into a typed [Handle]. /// /// The new handle will maintain the Strong or Weak status of the current handle. diff --git a/crates/bevy_asset/src/lib.rs b/crates/bevy_asset/src/lib.rs index 870f100d10306..b5ba1a1854d02 100644 --- a/crates/bevy_asset/src/lib.rs +++ b/crates/bevy_asset/src/lib.rs @@ -30,7 +30,6 @@ pub use path::*; use bevy_app::{prelude::Plugin, App}; use bevy_ecs::schedule::{StageLabel, SystemStage}; -use bevy_tasks::IoTaskPool; /// The names of asset stages in an App Schedule #[derive(Debug, Hash, PartialEq, Eq, Clone, StageLabel)] @@ -82,12 +81,8 @@ pub fn create_platform_default_asset_io(app: &mut App) -> Box { impl Plugin for AssetPlugin { fn build(&self, app: &mut App) { if !app.world.contains_resource::() { - let task_pool = app.world.resource::().0.clone(); - let source = create_platform_default_asset_io(app); - - let asset_server = AssetServer::with_boxed_io(source, task_pool); - + let asset_server = AssetServer::with_boxed_io(source); app.insert_resource(asset_server); } diff --git a/crates/bevy_asset/src/loader.rs b/crates/bevy_asset/src/loader.rs index 5a5de9b8c11eb..5d6b87d8388ba 100644 --- a/crates/bevy_asset/src/loader.rs +++ b/crates/bevy_asset/src/loader.rs @@ -5,7 +5,6 @@ use crate::{ use anyhow::Result; use bevy_ecs::system::{Res, ResMut}; use bevy_reflect::{TypeUuid, TypeUuidDynamic}; -use bevy_tasks::TaskPool; use bevy_utils::{BoxedFuture, HashMap}; use crossbeam_channel::{Receiver, Sender}; use downcast_rs::{impl_downcast, Downcast}; @@ -84,7 +83,6 @@ pub struct LoadContext<'a> { pub(crate) labeled_assets: HashMap, BoxedLoadedAsset>, pub(crate) path: &'a Path, pub(crate) version: usize, - pub(crate) task_pool: &'a TaskPool, } impl<'a> LoadContext<'a> { @@ -93,7 +91,6 @@ impl<'a> LoadContext<'a> { ref_change_channel: &'a RefChangeChannel, asset_io: &'a dyn AssetIo, version: usize, - task_pool: &'a TaskPool, ) -> Self { Self { ref_change_channel, @@ -101,7 +98,6 @@ impl<'a> LoadContext<'a> { labeled_assets: Default::default(), version, path, - task_pool, } } @@ -144,10 +140,6 @@ impl<'a> LoadContext<'a> { asset_metas } - pub fn task_pool(&self) -> &TaskPool { - self.task_pool - } - pub fn asset_io(&self) -> &dyn AssetIo { self.asset_io } diff --git a/crates/bevy_asset/src/path.rs b/crates/bevy_asset/src/path.rs index df99eaee19da1..cb39b3f5ca127 100644 --- a/crates/bevy_asset/src/path.rs +++ b/crates/bevy_asset/src/path.rs @@ -1,4 +1,4 @@ -use bevy_reflect::{Reflect, ReflectDeserialize}; +use bevy_reflect::{Reflect, ReflectDeserialize, ReflectSerialize}; use bevy_utils::AHasher; use serde::{Deserialize, Serialize}; use std::{ diff --git a/crates/bevy_audio/src/audio_output.rs b/crates/bevy_audio/src/audio_output.rs index f7010f2c7d8f3..e378a2c43f4c3 100644 --- a/crates/bevy_audio/src/audio_output.rs +++ b/crates/bevy_audio/src/audio_output.rs @@ -43,17 +43,15 @@ where Source: Asset + Decodable, { fn play_source(&self, audio_source: &Source, repeat: bool) -> Option { - if let Some(stream_handle) = &self.stream_handle { + self.stream_handle.as_ref().map(|stream_handle| { let sink = Sink::try_new(stream_handle).unwrap(); if repeat { sink.append(audio_source.decoder().repeat_infinite()); } else { sink.append(audio_source.decoder()); } - Some(sink) - } else { - None - } + sink + }) } fn try_play_queued( diff --git a/crates/bevy_core/src/lib.rs b/crates/bevy_core/src/lib.rs index 44b2be465b9cf..7c7d193564f87 100644 --- a/crates/bevy_core/src/lib.rs +++ b/crates/bevy_core/src/lib.rs @@ -3,25 +3,19 @@ mod name; mod task_pool_options; -mod time; pub use bytemuck::{bytes_of, cast_slice, Pod, Zeroable}; pub use name::*; pub use task_pool_options::*; -pub use time::*; pub mod prelude { //! The Bevy Core Prelude. #[doc(hidden)] - pub use crate::{DefaultTaskPoolOptions, Name, Time, Timer}; + pub use crate::{DefaultTaskPoolOptions, Name}; } use bevy_app::prelude::*; -use bevy_ecs::{ - entity::Entity, - schedule::{ExclusiveSystemDescriptorCoercion, SystemLabel}, - system::IntoExclusiveSystem, -}; +use bevy_ecs::entity::Entity; use bevy_utils::HashSet; use std::ops::Range; @@ -29,14 +23,6 @@ use std::ops::Range; #[derive(Default)] pub struct CorePlugin; -/// A `SystemLabel` enum for ordering systems relative to core Bevy systems. -#[derive(Debug, PartialEq, Eq, Clone, Hash, SystemLabel)] -pub enum CoreSystem { - /// Updates the elapsed time. Any system that interacts with [Time] component should run after - /// this. - Time, -} - impl Plugin for CorePlugin { fn build(&self, app: &mut App) { // Setup the default bevy task pools @@ -44,22 +30,9 @@ impl Plugin for CorePlugin { .get_resource::() .cloned() .unwrap_or_default() - .create_default_pools(&mut app.world); + .create_default_pools(); - app.init_resource::, -} - -/// Type-level linked list terminator for array dimensions. -pub struct DimensionNil; - -/// Trait for type-level array dimensions. Probably shouldn't be implemented outside this crate. -pub unsafe trait DimensionList { - /// Write dimensions in square brackets to a string, list tail to list head. - fn push_to_string(s: &mut String); -} - -unsafe impl DimensionList for DimensionNil { - fn push_to_string(_: &mut String) {} -} - -unsafe impl DimensionList for Dimension { - fn push_to_string(s: &mut String) { - use std::fmt::Write; - A::push_to_string(s); - write!(s, "[{}]", N).unwrap(); - } -} - -/// Trait for types that have a GLSL equivalent. Useful for generating GLSL code -/// from Rust structs. -pub unsafe trait Glsl { - /// The name of this type in GLSL, like `vec2` or `mat4`. - const NAME: &'static str; -} - -/// Trait for types that can be represented as a struct in GLSL. -/// -/// This trait should not generally be implemented by hand, but can be derived. -pub unsafe trait GlslStruct: Glsl { - /// The fields contained in this struct. - fn enumerate_fields(s: &mut String); - - /// Generates GLSL code that represents this struct and its fields. - fn glsl_definition() -> String { - let mut output = String::new(); - output.push_str("struct "); - output.push_str(Self::NAME); - output.push_str(" {\n"); - - Self::enumerate_fields(&mut output); - - output.push_str("};"); - output - } -} - -/// Trait for types that are expressible as a GLSL type with (possibly zero) array dimensions. -pub unsafe trait GlslArray { - /// Base type name. - const NAME: &'static str; - /// Type-level linked list of array dimensions, ordered outer to inner. - type ArraySize: DimensionList; -} - -unsafe impl GlslArray for T { - const NAME: &'static str = ::NAME; - type ArraySize = DimensionNil; -} - -unsafe impl Glsl for f32 { - const NAME: &'static str = "float"; -} - -unsafe impl Glsl for f64 { - const NAME: &'static str = "double"; -} - -unsafe impl Glsl for i32 { - const NAME: &'static str = "int"; -} - -unsafe impl Glsl for u32 { - const NAME: &'static str = "uint"; -} - -unsafe impl GlslArray for [T; N] { - const NAME: &'static str = T::NAME; - - type ArraySize = Dimension; -} diff --git a/crates/bevy_crevice/src/imp.rs b/crates/bevy_crevice/src/imp.rs deleted file mode 100644 index af49bd8915bbf..0000000000000 --- a/crates/bevy_crevice/src/imp.rs +++ /dev/null @@ -1,10 +0,0 @@ -mod imp_mint; - -#[cfg(feature = "cgmath")] -mod imp_cgmath; - -#[cfg(feature = "glam")] -mod imp_glam; - -#[cfg(feature = "nalgebra")] -mod imp_nalgebra; diff --git a/crates/bevy_crevice/src/imp/imp_cgmath.rs b/crates/bevy_crevice/src/imp/imp_cgmath.rs deleted file mode 100644 index 79ee7e071cec2..0000000000000 --- a/crates/bevy_crevice/src/imp/imp_cgmath.rs +++ /dev/null @@ -1,30 +0,0 @@ -easy_impl! { - Vec2 cgmath::Vector2 { x, y }, - Vec3 cgmath::Vector3 { x, y, z }, - Vec4 cgmath::Vector4 { x, y, z, w }, - - IVec2 cgmath::Vector2 { x, y }, - IVec3 cgmath::Vector3 { x, y, z }, - IVec4 cgmath::Vector4 { x, y, z, w }, - - UVec2 cgmath::Vector2 { x, y }, - UVec3 cgmath::Vector3 { x, y, z }, - UVec4 cgmath::Vector4 { x, y, z, w }, - - // bool vectors are disabled due to https://github.com/LPGhatguy/crevice/issues/36 - // BVec2 cgmath::Vector2 { x, y }, - // BVec3 cgmath::Vector3 { x, y, z }, - // BVec4 cgmath::Vector4 { x, y, z, w }, - - DVec2 cgmath::Vector2 { x, y }, - DVec3 cgmath::Vector3 { x, y, z }, - DVec4 cgmath::Vector4 { x, y, z, w }, - - Mat2 cgmath::Matrix2 { x, y }, - Mat3 cgmath::Matrix3 { x, y, z }, - Mat4 cgmath::Matrix4 { x, y, z, w }, - - DMat2 cgmath::Matrix2 { x, y }, - DMat3 cgmath::Matrix3 { x, y, z }, - DMat4 cgmath::Matrix4 { x, y, z, w }, -} diff --git a/crates/bevy_crevice/src/imp/imp_glam.rs b/crates/bevy_crevice/src/imp/imp_glam.rs deleted file mode 100644 index 58ef711c27855..0000000000000 --- a/crates/bevy_crevice/src/imp/imp_glam.rs +++ /dev/null @@ -1,24 +0,0 @@ -minty_impl! { - mint::Vector2 => glam::Vec2, - mint::Vector3 => glam::Vec3, - mint::Vector4 => glam::Vec4, - mint::Vector2 => glam::IVec2, - mint::Vector3 => glam::IVec3, - mint::Vector4 => glam::IVec4, - mint::Vector2 => glam::UVec2, - mint::Vector3 => glam::UVec3, - mint::Vector4 => glam::UVec4, - // bool vectors are disabled due to https://github.com/LPGhatguy/crevice/issues/36 - // mint::Vector2 => glam::BVec2, - // mint::Vector3 => glam::BVec3, - // mint::Vector4 => glam::BVec4, - mint::Vector2 => glam::DVec2, - mint::Vector3 => glam::DVec3, - mint::Vector4 => glam::DVec4, - mint::ColumnMatrix2 => glam::Mat2, - mint::ColumnMatrix3 => glam::Mat3, - mint::ColumnMatrix4 => glam::Mat4, - mint::ColumnMatrix2 => glam::DMat2, - mint::ColumnMatrix3 => glam::DMat3, - mint::ColumnMatrix4 => glam::DMat4, -} diff --git a/crates/bevy_crevice/src/imp/imp_mint.rs b/crates/bevy_crevice/src/imp/imp_mint.rs deleted file mode 100644 index 056a181c2ca70..0000000000000 --- a/crates/bevy_crevice/src/imp/imp_mint.rs +++ /dev/null @@ -1,30 +0,0 @@ -easy_impl! { - Vec2 mint::Vector2 { x, y }, - Vec3 mint::Vector3 { x, y, z }, - Vec4 mint::Vector4 { x, y, z, w }, - - IVec2 mint::Vector2 { x, y }, - IVec3 mint::Vector3 { x, y, z }, - IVec4 mint::Vector4 { x, y, z, w }, - - UVec2 mint::Vector2 { x, y }, - UVec3 mint::Vector3 { x, y, z }, - UVec4 mint::Vector4 { x, y, z, w }, - - // bool vectors are disabled due to https://github.com/LPGhatguy/crevice/issues/36 - // BVec2 mint::Vector2 { x, y }, - // BVec3 mint::Vector3 { x, y, z }, - // BVec4 mint::Vector4 { x, y, z, w }, - - DVec2 mint::Vector2 { x, y }, - DVec3 mint::Vector3 { x, y, z }, - DVec4 mint::Vector4 { x, y, z, w }, - - Mat2 mint::ColumnMatrix2 { x, y }, - Mat3 mint::ColumnMatrix3 { x, y, z }, - Mat4 mint::ColumnMatrix4 { x, y, z, w }, - - DMat2 mint::ColumnMatrix2 { x, y }, - DMat3 mint::ColumnMatrix3 { x, y, z }, - DMat4 mint::ColumnMatrix4 { x, y, z, w }, -} diff --git a/crates/bevy_crevice/src/imp/imp_nalgebra.rs b/crates/bevy_crevice/src/imp/imp_nalgebra.rs deleted file mode 100644 index 3d1b89c0d315b..0000000000000 --- a/crates/bevy_crevice/src/imp/imp_nalgebra.rs +++ /dev/null @@ -1,24 +0,0 @@ -minty_impl! { - mint::Vector2 => nalgebra::Vector2, - mint::Vector3 => nalgebra::Vector3, - mint::Vector4 => nalgebra::Vector4, - mint::Vector2 => nalgebra::Vector2, - mint::Vector3 => nalgebra::Vector3, - mint::Vector4 => nalgebra::Vector4, - mint::Vector2 => nalgebra::Vector2, - mint::Vector3 => nalgebra::Vector3, - mint::Vector4 => nalgebra::Vector4, - // bool vectors are disabled due to https://github.com/LPGhatguy/crevice/issues/36 - // mint::Vector2 => nalgebra::Vector2, - // mint::Vector3 => nalgebra::Vector3, - // mint::Vector4 => nalgebra::Vector4, - mint::Vector2 => nalgebra::Vector2, - mint::Vector3 => nalgebra::Vector3, - mint::Vector4 => nalgebra::Vector4, - mint::ColumnMatrix2 => nalgebra::Matrix2, - mint::ColumnMatrix3 => nalgebra::Matrix3, - mint::ColumnMatrix4 => nalgebra::Matrix4, - mint::ColumnMatrix2 => nalgebra::Matrix2, - mint::ColumnMatrix3 => nalgebra::Matrix3, - mint::ColumnMatrix4 => nalgebra::Matrix4, -} diff --git a/crates/bevy_crevice/src/internal.rs b/crates/bevy_crevice/src/internal.rs deleted file mode 100644 index cd22972fb30aa..0000000000000 --- a/crates/bevy_crevice/src/internal.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! This module is internal to crevice but used by its derive macro. No -//! guarantees are made about its contents. - -pub use bytemuck; - -/// Gives the number of bytes needed to make `offset` be aligned to `alignment`. -pub const fn align_offset(offset: usize, alignment: usize) -> usize { - if alignment == 0 || offset % alignment == 0 { - 0 - } else { - alignment - offset % alignment - } -} - -/// Max of two `usize`. Implemented because the `max` method from `Ord` cannot -/// be used in const fns. -pub const fn max(a: usize, b: usize) -> usize { - if a > b { - a - } else { - b - } -} - -/// Max of an array of `usize`. This function's implementation is funky because -/// we have no for loops! -pub const fn max_arr(input: [usize; N]) -> usize { - let mut max = 0; - let mut i = 0; - - while i < N { - if input[i] > max { - max = input[i]; - } - - i += 1; - } - - max -} diff --git a/crates/bevy_crevice/src/lib.rs b/crates/bevy_crevice/src/lib.rs deleted file mode 100644 index 48451966be4a5..0000000000000 --- a/crates/bevy_crevice/src/lib.rs +++ /dev/null @@ -1,174 +0,0 @@ -#![allow( - clippy::new_without_default, - clippy::needless_update, - clippy::len_without_is_empty, - clippy::needless_range_loop, - clippy::all, - clippy::doc_markdown -)] -/*! -[![GitHub CI Status](https://github.com/LPGhatguy/crevice/workflows/CI/badge.svg)](https://github.com/LPGhatguy/crevice/actions) -[![crevice on crates.io](https://img.shields.io/crates/v/crevice.svg)](https://crates.io/crates/crevice) -[![crevice docs](https://img.shields.io/badge/docs-docs.rs-orange.svg)](https://docs.rs/crevice) - -Crevice creates GLSL-compatible versions of types through the power of derive -macros. Generated structures provide an [`as_bytes`][std140::Std140::as_bytes] -method to allow safely packing data into buffers for uploading. - -Generated structs also implement [`bytemuck::Zeroable`] and -[`bytemuck::Pod`] for use with other libraries. - -Crevice is similar to [`glsl-layout`][glsl-layout], but supports types from many -math crates, can generate GLSL source from structs, and explicitly initializes -padding to remove one source of undefined behavior. - -Crevice has support for many Rust math libraries via feature flags, and most -other math libraries by use of the mint crate. Crevice currently supports: - -* mint 0.5, enabled by default -* cgmath 0.18, using the `cgmath` feature -* nalgebra 0.29, using the `nalgebra` feature -* glam 0.19, using the `glam` feature - -PRs are welcome to add or update math libraries to Crevice. - -If your math library is not supported, it's possible to define structs using the -types from mint and convert your math library's types into mint types. This is -supported by most Rust math libraries. - -Your math library may require you to turn on a feature flag to get mint support. -For example, cgmath requires the "mint" feature to be enabled to allow -conversions to and from mint types. - -## Examples - -### Single Value - -Uploading many types can be done by deriving [`AsStd140`][std140::AsStd140] and -using [`as_std140`][std140::AsStd140::as_std140] and -[`as_bytes`][std140::Std140::as_bytes] to turn the result into bytes. - -```glsl -uniform MAIN { - mat3 orientation; - vec3 position; - float scale; -} main; -``` - -```rust -use bevy_crevice::std140::{AsStd140, Std140}; - -#[derive(AsStd140)] -struct MainUniform { - orientation: mint::ColumnMatrix3, - position: mint::Vector3, - scale: f32, -} - -let value = MainUniform { - orientation: [ - [1.0, 0.0, 0.0], - [0.0, 1.0, 0.0], - [0.0, 0.0, 1.0], - ].into(), - position: [1.0, 2.0, 3.0].into(), - scale: 4.0, -}; - -let value_std140 = value.as_std140(); - -# fn upload_data_to_gpu(_value: &[u8]) {} -upload_data_to_gpu(value_std140.as_bytes()); -``` - -### Sequential Types - -More complicated data can be uploaded using the std140 -[`Writer`][std140::Writer] type. - -```glsl -struct PointLight { - vec3 position; - vec3 color; - float brightness; -}; - -buffer POINT_LIGHTS { - uint len; - PointLight[] lights; -} point_lights; -``` - -```rust -use bevy_crevice::std140::{self, AsStd140}; - -#[derive(AsStd140)] -struct PointLight { - position: mint::Vector3, - color: mint::Vector3, - brightness: f32, -} - -let lights = vec![ - PointLight { - position: [0.0, 1.0, 0.0].into(), - color: [1.0, 0.0, 0.0].into(), - brightness: 0.6, - }, - PointLight { - position: [0.0, 4.0, 3.0].into(), - color: [1.0, 1.0, 1.0].into(), - brightness: 1.0, - }, -]; - -# fn map_gpu_buffer_for_write() -> &'static mut [u8] { -# Box::leak(vec![0; 1024].into_boxed_slice()) -# } -let target_buffer = map_gpu_buffer_for_write(); -let mut writer = std140::Writer::new(target_buffer); - -let light_count = lights.len() as u32; -writer.write(&light_count)?; - -// Crevice will automatically insert the required padding to align the -// PointLight structure correctly. In this case, there will be 12 bytes of -// padding between the length field and the light list. - -writer.write(lights.as_slice())?; - -# fn unmap_gpu_buffer() {} -unmap_gpu_buffer(); - -# Ok::<(), std::io::Error>(()) -``` - -## Features - -* `std` (default): Enables [`std::io::Write`]-based structs. -* `cgmath`: Enables support for types from cgmath. -* `nalgebra`: Enables support for types from nalgebra. -* `glam`: Enables support for types from glam. - -## Minimum Supported Rust Version (MSRV) - -Crevice supports Rust 1.52.1 and newer due to use of new `const fn` features. - -[glsl-layout]: https://github.com/rustgd/glsl-layout -*/ - -#![deny(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -#[macro_use] -mod util; - -pub mod glsl; -pub mod std140; -pub mod std430; - -#[doc(hidden)] -pub mod internal; - -mod imp; diff --git a/crates/bevy_crevice/src/std140.rs b/crates/bevy_crevice/src/std140.rs deleted file mode 100644 index dd7cde1cabf40..0000000000000 --- a/crates/bevy_crevice/src/std140.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Defines traits and types for working with data adhering to GLSL's `std140` -//! layout specification. - -mod dynamic_uniform; -mod primitives; -mod sizer; -mod traits; -#[cfg(feature = "std")] -mod writer; - -pub use self::dynamic_uniform::*; -pub use self::primitives::*; -pub use self::sizer::*; -pub use self::traits::*; -#[cfg(feature = "std")] -pub use self::writer::*; - -pub use bevy_crevice_derive::AsStd140; diff --git a/crates/bevy_crevice/src/std140/dynamic_uniform.rs b/crates/bevy_crevice/src/std140/dynamic_uniform.rs deleted file mode 100644 index 262f8ea449842..0000000000000 --- a/crates/bevy_crevice/src/std140/dynamic_uniform.rs +++ /dev/null @@ -1,68 +0,0 @@ -use bytemuck::{Pod, Zeroable}; - -#[allow(unused_imports)] -use crate::internal::{align_offset, max}; -use crate::std140::{AsStd140, Std140}; - -/// Wrapper type that aligns the inner type to at least 256 bytes. -/// -/// This type is useful for ensuring correct alignment when creating dynamic -/// uniform buffers in APIs like WebGPU. -pub struct DynamicUniform(pub T); - -impl AsStd140 for DynamicUniform { - type Output = DynamicUniformStd140<::Output>; - - fn as_std140(&self) -> Self::Output { - DynamicUniformStd140(self.0.as_std140()) - } - - fn from_std140(value: Self::Output) -> Self { - DynamicUniform(::from_std140(value.0)) - } -} - -/// std140 variant of [`DynamicUniform`]. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct DynamicUniformStd140(T); - -unsafe impl Std140 for DynamicUniformStd140 { - const ALIGNMENT: usize = max(256, T::ALIGNMENT); - #[cfg(const_evaluatable_checked)] - type Padded = crate::std140::Std140Padded< - Self, - { align_offset(core::mem::size_of::(), max(256, T::ALIGNMENT)) }, - >; - #[cfg(not(const_evaluatable_checked))] - type Padded = crate::std140::InvalidPadded; -} - -unsafe impl Zeroable for DynamicUniformStd140 {} -unsafe impl Pod for DynamicUniformStd140 {} - -#[cfg(test)] -mod test { - use super::*; - - use crate::std140::{self, WriteStd140}; - - #[test] - fn size_is_unchanged() { - let dynamic_f32 = DynamicUniform(0.0f32); - - assert_eq!(dynamic_f32.std140_size(), 0.0f32.std140_size()); - } - - #[test] - fn alignment_applies() { - let mut output = Vec::new(); - let mut writer = std140::Writer::new(&mut output); - - writer.write(&DynamicUniform(0.0f32)).unwrap(); - assert_eq!(writer.len(), 4); - - writer.write(&DynamicUniform(1.0f32)).unwrap(); - assert_eq!(writer.len(), 260); - } -} diff --git a/crates/bevy_crevice/src/std140/primitives.rs b/crates/bevy_crevice/src/std140/primitives.rs deleted file mode 100644 index 34e161e3b7818..0000000000000 --- a/crates/bevy_crevice/src/std140/primitives.rs +++ /dev/null @@ -1,175 +0,0 @@ -use bytemuck::{Pod, Zeroable}; - -use crate::glsl::Glsl; -use crate::std140::{Std140, Std140Padded}; - -use crate::internal::{align_offset, max}; -use core::mem::size_of; - -unsafe impl Std140 for f32 { - const ALIGNMENT: usize = 4; - type Padded = Std140Padded; -} - -unsafe impl Std140 for f64 { - const ALIGNMENT: usize = 8; - type Padded = Std140Padded; -} - -unsafe impl Std140 for i32 { - const ALIGNMENT: usize = 4; - type Padded = Std140Padded; -} - -unsafe impl Std140 for u32 { - const ALIGNMENT: usize = 4; - type Padded = Std140Padded; -} - -macro_rules! vectors { - ( - $( - #[$doc:meta] align($align:literal) $glsl_name:ident $name:ident <$prim:ident> ($($field:ident),+) - )+ - ) => { - $( - #[$doc] - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy, PartialEq)] - #[repr(C)] - pub struct $name { - $(pub $field: $prim,)+ - } - - unsafe impl Zeroable for $name {} - unsafe impl Pod for $name {} - - unsafe impl Std140 for $name { - const ALIGNMENT: usize = $align; - type Padded = Std140Padded(), max(16, $align))}>; - } - - unsafe impl Glsl for $name { - const NAME: &'static str = stringify!($glsl_name); - } - )+ - }; -} - -vectors! { - #[doc = "Corresponds to a GLSL `vec2` in std140 layout."] align(8) vec2 Vec2(x, y) - #[doc = "Corresponds to a GLSL `vec3` in std140 layout."] align(16) vec3 Vec3(x, y, z) - #[doc = "Corresponds to a GLSL `vec4` in std140 layout."] align(16) vec4 Vec4(x, y, z, w) - - #[doc = "Corresponds to a GLSL `ivec2` in std140 layout."] align(8) ivec2 IVec2(x, y) - #[doc = "Corresponds to a GLSL `ivec3` in std140 layout."] align(16) ivec3 IVec3(x, y, z) - #[doc = "Corresponds to a GLSL `ivec4` in std140 layout."] align(16) ivec4 IVec4(x, y, z, w) - - #[doc = "Corresponds to a GLSL `uvec2` in std140 layout."] align(8) uvec2 UVec2(x, y) - #[doc = "Corresponds to a GLSL `uvec3` in std140 layout."] align(16) uvec3 UVec3(x, y, z) - #[doc = "Corresponds to a GLSL `uvec4` in std140 layout."] align(16) uvec4 UVec4(x, y, z, w) - - // bool vectors are disabled due to https://github.com/LPGhatguy/crevice/issues/36 - - // #[doc = "Corresponds to a GLSL `bvec2` in std140 layout."] align(8) bvec2 BVec2(x, y) - // #[doc = "Corresponds to a GLSL `bvec3` in std140 layout."] align(16) bvec3 BVec3(x, y, z) - // #[doc = "Corresponds to a GLSL `bvec4` in std140 layout."] align(16) bvec4 BVec4(x, y, z, w) - - #[doc = "Corresponds to a GLSL `dvec2` in std140 layout."] align(16) dvec2 DVec2(x, y) - #[doc = "Corresponds to a GLSL `dvec3` in std140 layout."] align(32) dvec3 DVec3(x, y, z) - #[doc = "Corresponds to a GLSL `dvec4` in std140 layout."] align(32) dvec4 DVec4(x, y, z, w) -} - -macro_rules! matrices { - ( - $( - #[$doc:meta] - align($align:literal) - $glsl_name:ident $name:ident { - $($field:ident: $field_ty:ty,)+ - } - )+ - ) => { - $( - #[$doc] - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - #[repr(C)] - pub struct $name { - $(pub $field: $field_ty,)+ - } - - unsafe impl Zeroable for $name {} - unsafe impl Pod for $name {} - - unsafe impl Std140 for $name { - const ALIGNMENT: usize = $align; - /// Matrices are technically arrays of primitives, and as such require pad at end. - const PAD_AT_END: bool = true; - type Padded = Std140Padded(), max(16, $align))}>; - } - - unsafe impl Glsl for $name { - const NAME: &'static str = stringify!($glsl_name); - } - )+ - }; -} - -matrices! { - #[doc = "Corresponds to a GLSL `mat2` in std140 layout."] - align(16) - mat2 Mat2 { - x: Vec2, - _pad_x: [f32; 2], - y: Vec2, - _pad_y: [f32; 2], - } - - #[doc = "Corresponds to a GLSL `mat3` in std140 layout."] - align(16) - mat3 Mat3 { - x: Vec3, - _pad_x: f32, - y: Vec3, - _pad_y: f32, - z: Vec3, - _pad_z: f32, - } - - #[doc = "Corresponds to a GLSL `mat4` in std140 layout."] - align(16) - mat4 Mat4 { - x: Vec4, - y: Vec4, - z: Vec4, - w: Vec4, - } - - #[doc = "Corresponds to a GLSL `dmat2` in std140 layout."] - align(16) - dmat2 DMat2 { - x: DVec2, - y: DVec2, - } - - #[doc = "Corresponds to a GLSL `dmat3` in std140 layout."] - align(32) - dmat3 DMat3 { - x: DVec3, - _pad_x: f64, - y: DVec3, - _pad_y: f64, - z: DVec3, - _pad_z: f64, - } - - #[doc = "Corresponds to a GLSL `dmat3` in std140 layout."] - align(32) - dmat4 DMat4 { - x: DVec4, - y: DVec4, - z: DVec4, - w: DVec4, - } -} diff --git a/crates/bevy_crevice/src/std140/sizer.rs b/crates/bevy_crevice/src/std140/sizer.rs deleted file mode 100644 index 87c27cb63b3d9..0000000000000 --- a/crates/bevy_crevice/src/std140/sizer.rs +++ /dev/null @@ -1,81 +0,0 @@ -use core::mem::size_of; - -use crate::internal::align_offset; -use crate::std140::{AsStd140, Std140}; - -/** -Type that computes the buffer size needed by a series of `std140` types laid -out. - -This type works well well when paired with `Writer`, precomputing a buffer's -size to alleviate the need to dynamically re-allocate buffers. - -## Example - -```glsl -struct Frob { - vec3 size; - float frobiness; -} - -buffer FROBS { - uint len; - Frob[] frobs; -} frobs; -``` - -``` -use bevy_crevice::std140::{self, AsStd140}; - -#[derive(AsStd140)] -struct Frob { - size: mint::Vector3, - frobiness: f32, -} - -// Many APIs require that buffers contain at least enough space for all -// fixed-size bindiongs to a buffer as well as one element of any arrays, if -// there are any. -let mut sizer = std140::Sizer::new(); -sizer.add::(); -sizer.add::(); - -# fn create_buffer_with_size(size: usize) {} -let buffer = create_buffer_with_size(sizer.len()); -# assert_eq!(sizer.len(), 32); -``` -*/ -pub struct Sizer { - offset: usize, -} - -impl Sizer { - /// Create a new `Sizer`. - pub fn new() -> Self { - Self { offset: 0 } - } - - /// Add a type's necessary padding and size to the `Sizer`. Returns the - /// offset into the buffer where that type would be written. - pub fn add(&mut self) -> usize - where - T: AsStd140, - { - let size = size_of::<::Output>(); - let alignment = ::Output::ALIGNMENT; - let padding = align_offset(self.offset, alignment); - - self.offset += padding; - let write_here = self.offset; - - self.offset += size; - - write_here - } - - /// Returns the number of bytes required to contain all the types added to - /// the `Sizer`. - pub fn len(&self) -> usize { - self.offset - } -} diff --git a/crates/bevy_crevice/src/std140/traits.rs b/crates/bevy_crevice/src/std140/traits.rs deleted file mode 100644 index 392251c3f80ec..0000000000000 --- a/crates/bevy_crevice/src/std140/traits.rs +++ /dev/null @@ -1,284 +0,0 @@ -use core::mem::{size_of, MaybeUninit}; -#[cfg(feature = "std")] -use std::io::{self, Write}; - -use bytemuck::{bytes_of, Pod, Zeroable}; - -#[cfg(feature = "std")] -use crate::std140::Writer; - -/// Trait implemented for all `std140` primitives. Generally should not be -/// implemented outside this crate. -pub unsafe trait Std140: Copy + Zeroable + Pod { - /// The required alignment of the type. Must be a power of two. - /// - /// This is distinct from the value returned by `std::mem::align_of` because - /// `AsStd140` structs do not use Rust's alignment. This enables them to - /// control and zero their padding bytes, making converting them to and from - /// slices safe. - const ALIGNMENT: usize; - - /// Whether this type requires a padding at the end (ie, is a struct or an array - /// of primitives). - /// See - /// (rule 4 and 9) - const PAD_AT_END: bool = false; - /// Padded type (Std140Padded specialization) - /// The usual implementation is - /// type Padded = Std140Padded(), max(16, ALIGNMENT))}>; - type Padded: Std140Convertible; - - /// Casts the type to a byte array. Implementors should not override this - /// method. - /// - /// # Safety - /// This is always safe due to the requirements of [`bytemuck::Pod`] being a - /// prerequisite for this trait. - fn as_bytes(&self) -> &[u8] { - bytes_of(self) - } -} - -/// Trait specifically for Std140::Padded, implements conversions between padded type and base type. -pub trait Std140Convertible: Copy { - /// Convert from self to Std140 - fn into_std140(self) -> T; - /// Convert from Std140 to self - fn from_std140(_: T) -> Self; -} - -impl Std140Convertible for T { - fn into_std140(self) -> T { - self - } - fn from_std140(also_self: T) -> Self { - also_self - } -} - -/// Unfortunately, we cannot easily derive padded representation for generic Std140 types. -/// For now, we'll just use this empty enum with no values. -#[derive(Copy, Clone)] -pub enum InvalidPadded {} -impl Std140Convertible for InvalidPadded { - fn into_std140(self) -> T { - unimplemented!() - } - fn from_std140(_: T) -> Self { - unimplemented!() - } -} -/** -Trait implemented for all types that can be turned into `std140` values. -* -This trait can often be `#[derive]`'d instead of manually implementing it. Any -struct which contains only fields that also implement `AsStd140` can derive -`AsStd140`. - -Types from the mint crate implement `AsStd140`, making them convenient for use -in uniform types. Most Rust math crates, like cgmath, nalgebra, and -ultraviolet support mint. - -## Example - -```glsl -uniform CAMERA { - mat4 view; - mat4 projection; -} camera; -``` - -```no_run -use bevy_crevice::std140::{AsStd140, Std140}; - -#[derive(AsStd140)] -struct CameraUniform { - view: mint::ColumnMatrix4, - projection: mint::ColumnMatrix4, -} - -let view: mint::ColumnMatrix4 = todo!("your math code here"); -let projection: mint::ColumnMatrix4 = todo!("your math code here"); - -let camera = CameraUniform { - view, - projection, -}; - -# fn write_to_gpu_buffer(bytes: &[u8]) {} -let camera_std140 = camera.as_std140(); -write_to_gpu_buffer(camera_std140.as_bytes()); -``` -*/ -pub trait AsStd140 { - /// The `std140` version of this value. - type Output: Std140; - - /// Convert this value into the `std140` version of itself. - fn as_std140(&self) -> Self::Output; - - /// Returns the size of the `std140` version of this type. Useful for - /// pre-sizing buffers. - fn std140_size_static() -> usize { - size_of::() - } - - /// Converts from `std140` version of self to self. - fn from_std140(val: Self::Output) -> Self; -} - -impl AsStd140 for T -where - T: Std140, -{ - type Output = Self; - - fn as_std140(&self) -> Self { - *self - } - - fn from_std140(x: Self) -> Self { - x - } -} - -#[doc(hidden)] -#[derive(Copy, Clone, Debug)] -pub struct Std140Padded { - inner: T, - _padding: [u8; PAD], -} - -unsafe impl Zeroable for Std140Padded {} -unsafe impl Pod for Std140Padded {} - -impl Std140Convertible for Std140Padded { - fn into_std140(self) -> T { - self.inner - } - - fn from_std140(inner: T) -> Self { - Self { - inner, - _padding: [0u8; PAD], - } - } -} - -#[doc(hidden)] -#[derive(Copy, Clone, Debug)] -#[repr(transparent)] -pub struct Std140Array([T::Padded; N]); - -unsafe impl Zeroable for Std140Array where T::Padded: Zeroable {} -unsafe impl Pod for Std140Array where T::Padded: Pod {} -unsafe impl Std140 for Std140Array -where - T::Padded: Pod, -{ - const ALIGNMENT: usize = crate::internal::max(T::ALIGNMENT, 16); - type Padded = Self; -} - -impl Std140Array { - fn uninit_array() -> [MaybeUninit; N] { - unsafe { MaybeUninit::uninit().assume_init() } - } - - fn from_uninit_array(a: [MaybeUninit; N]) -> Self { - unsafe { core::mem::transmute_copy(&a) } - } -} - -impl AsStd140 for [T; N] -where - ::Padded: Pod, -{ - type Output = Std140Array; - fn as_std140(&self) -> Self::Output { - let mut res = Self::Output::uninit_array(); - - for i in 0..N { - res[i] = MaybeUninit::new(Std140Convertible::from_std140(self[i].as_std140())); - } - - Self::Output::from_uninit_array(res) - } - - fn from_std140(val: Self::Output) -> Self { - let mut res: [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; - for i in 0..N { - res[i] = MaybeUninit::new(T::from_std140(Std140Convertible::into_std140(val.0[i]))); - } - unsafe { core::mem::transmute_copy(&res) } - } -} - -/// Trait implemented for all types that can be written into a buffer as -/// `std140` bytes. This type is more general than [`AsStd140`]: all `AsStd140` -/// types implement `WriteStd140`, but not the other way around. -/// -/// While `AsStd140` requires implementers to return a type that implements the -/// `Std140` trait, `WriteStd140` directly writes bytes using a [`Writer`]. This -/// makes `WriteStd140` usable for writing slices or other DSTs that could not -/// implement `AsStd140` without allocating new memory on the heap. -#[cfg(feature = "std")] -pub trait WriteStd140 { - /// Writes this value into the given [`Writer`] using `std140` layout rules. - /// - /// Should return the offset of the first byte of this type, as returned by - /// the first call to [`Writer::write`]. - fn write_std140(&self, writer: &mut Writer) -> io::Result; - - /// The space required to write this value using `std140` layout rules. This - /// does not include alignment padding that may be needed before or after - /// this type when written as part of a larger buffer. - fn std140_size(&self) -> usize { - let mut writer = Writer::new(io::sink()); - self.write_std140(&mut writer).unwrap(); - writer.len() - } -} - -#[cfg(feature = "std")] -impl WriteStd140 for T -where - T: AsStd140, -{ - fn write_std140(&self, writer: &mut Writer) -> io::Result { - writer.write_std140(&self.as_std140()) - } - - fn std140_size(&self) -> usize { - size_of::<::Output>() - } -} - -#[cfg(feature = "std")] -impl WriteStd140 for [T] -where - T: WriteStd140, -{ - fn write_std140(&self, writer: &mut Writer) -> io::Result { - // if no items are written, offset is current position of the writer - let mut offset = writer.len(); - - let mut iter = self.iter(); - - if let Some(item) = iter.next() { - offset = item.write_std140(writer)?; - } - - for item in iter { - item.write_std140(writer)?; - } - - Ok(offset) - } - - fn std140_size(&self) -> usize { - let mut writer = Writer::new(io::sink()); - self.write_std140(&mut writer).unwrap(); - writer.len() - } -} diff --git a/crates/bevy_crevice/src/std140/writer.rs b/crates/bevy_crevice/src/std140/writer.rs deleted file mode 100644 index aeed06ff78e9b..0000000000000 --- a/crates/bevy_crevice/src/std140/writer.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::io::{self, Write}; -use std::mem::size_of; - -use bytemuck::bytes_of; - -use crate::internal::align_offset; -use crate::std140::{AsStd140, Std140, WriteStd140}; - -/** -Type that enables writing correctly aligned `std140` values to a buffer. - -`Writer` is useful when many values need to be laid out in a row that cannot be -represented by a struct alone, like dynamically sized arrays or dynamically -laid-out values. - -## Example -In this example, we'll write a length-prefixed list of lights to a buffer. -`std140::Writer` helps align correctly, even across multiple structs, which can -be tricky and error-prone otherwise. - -```glsl -struct PointLight { - vec3 position; - vec3 color; - float brightness; -}; - -buffer POINT_LIGHTS { - uint len; - PointLight[] lights; -} point_lights; -``` - -``` -use bevy_crevice::std140::{self, AsStd140}; - -#[derive(AsStd140)] -struct PointLight { - position: mint::Vector3, - color: mint::Vector3, - brightness: f32, -} - -let lights = vec![ - PointLight { - position: [0.0, 1.0, 0.0].into(), - color: [1.0, 0.0, 0.0].into(), - brightness: 0.6, - }, - PointLight { - position: [0.0, 4.0, 3.0].into(), - color: [1.0, 1.0, 1.0].into(), - brightness: 1.0, - }, -]; - -# fn map_gpu_buffer_for_write() -> &'static mut [u8] { -# Box::leak(vec![0; 1024].into_boxed_slice()) -# } -let target_buffer = map_gpu_buffer_for_write(); -let mut writer = std140::Writer::new(target_buffer); - -let light_count = lights.len() as u32; -writer.write(&light_count)?; - -// Crevice will automatically insert the required padding to align the -// PointLight structure correctly. In this case, there will be 12 bytes of -// padding between the length field and the light list. - -writer.write(lights.as_slice())?; - -# fn unmap_gpu_buffer() {} -unmap_gpu_buffer(); - -# Ok::<(), std::io::Error>(()) -``` -*/ -pub struct Writer { - writer: W, - offset: usize, -} - -impl Writer { - /// Create a new `Writer`, wrapping a buffer, file, or other type that - /// implements [`std::io::Write`]. - pub fn new(writer: W) -> Self { - Self { writer, offset: 0 } - } - - /// Write a new value to the underlying buffer, writing zeroed padding where - /// necessary. - /// - /// Returns the offset into the buffer that the value was written to. - pub fn write(&mut self, value: &T) -> io::Result - where - T: WriteStd140 + ?Sized, - { - value.write_std140(self) - } - - /// Write an iterator of values to the underlying buffer. - /// - /// Returns the offset into the buffer that the first value was written to. - /// If no values were written, returns the `len()`. - pub fn write_iter(&mut self, iter: I) -> io::Result - where - I: IntoIterator, - T: WriteStd140, - { - let mut offset = self.offset; - - let mut iter = iter.into_iter(); - - if let Some(item) = iter.next() { - offset = item.write_std140(self)?; - } - - for item in iter { - item.write_std140(self)?; - } - - Ok(offset) - } - - /// Write an `Std140` type to the underlying buffer. - pub fn write_std140(&mut self, value: &T) -> io::Result - where - T: Std140, - { - let padding = align_offset(self.offset, T::ALIGNMENT); - - for _ in 0..padding { - self.writer.write_all(&[0])?; - } - self.offset += padding; - - let value = value.as_std140(); - self.writer.write_all(bytes_of(&value))?; - - let write_here = self.offset; - self.offset += size_of::(); - - Ok(write_here) - } - - /// Write a slice of values to the underlying buffer. - #[deprecated( - since = "0.6.0", - note = "Use `write` instead -- it now works on slices." - )] - pub fn write_slice(&mut self, slice: &[T]) -> io::Result - where - T: AsStd140, - { - self.write(slice) - } - - /// Returns the amount of data written by this `Writer`. - pub fn len(&self) -> usize { - self.offset - } -} diff --git a/crates/bevy_crevice/src/std430.rs b/crates/bevy_crevice/src/std430.rs deleted file mode 100644 index 676c999556c11..0000000000000 --- a/crates/bevy_crevice/src/std430.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Defines traits and types for working with data adhering to GLSL's `std140` -//! layout specification. - -mod primitives; -mod sizer; -mod traits; -#[cfg(feature = "std")] -mod writer; - -pub use self::primitives::*; -pub use self::sizer::*; -pub use self::traits::*; -#[cfg(feature = "std")] -pub use self::writer::*; - -pub use bevy_crevice_derive::AsStd430; diff --git a/crates/bevy_crevice/src/std430/primitives.rs b/crates/bevy_crevice/src/std430/primitives.rs deleted file mode 100644 index 3348e82c7b2c6..0000000000000 --- a/crates/bevy_crevice/src/std430/primitives.rs +++ /dev/null @@ -1,173 +0,0 @@ -use bytemuck::{Pod, Zeroable}; - -use crate::glsl::Glsl; -use crate::std430::{Std430, Std430Padded}; - -use crate::internal::align_offset; -use core::mem::size_of; - -unsafe impl Std430 for f32 { - const ALIGNMENT: usize = 4; - type Padded = Self; -} - -unsafe impl Std430 for f64 { - const ALIGNMENT: usize = 8; - type Padded = Self; -} - -unsafe impl Std430 for i32 { - const ALIGNMENT: usize = 4; - type Padded = Self; -} - -unsafe impl Std430 for u32 { - const ALIGNMENT: usize = 4; - type Padded = Self; -} - -macro_rules! vectors { - ( - $( - #[$doc:meta] align($align:literal) $glsl_name:ident $name:ident <$prim:ident> ($($field:ident),+) - )+ - ) => { - $( - #[$doc] - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - #[repr(C)] - pub struct $name { - $(pub $field: $prim,)+ - } - - unsafe impl Zeroable for $name {} - unsafe impl Pod for $name {} - - unsafe impl Std430 for $name { - const ALIGNMENT: usize = $align; - type Padded = Std430Padded(), $align)}>; - } - - unsafe impl Glsl for $name { - const NAME: &'static str = stringify!($glsl_name); - } - )+ - }; -} - -vectors! { - #[doc = "Corresponds to a GLSL `vec2` in std430 layout."] align(8) vec2 Vec2(x, y) - #[doc = "Corresponds to a GLSL `vec3` in std430 layout."] align(16) vec3 Vec3(x, y, z) - #[doc = "Corresponds to a GLSL `vec4` in std430 layout."] align(16) vec4 Vec4(x, y, z, w) - - #[doc = "Corresponds to a GLSL `ivec2` in std430 layout."] align(8) ivec2 IVec2(x, y) - #[doc = "Corresponds to a GLSL `ivec3` in std430 layout."] align(16) ivec3 IVec3(x, y, z) - #[doc = "Corresponds to a GLSL `ivec4` in std430 layout."] align(16) ivec4 IVec4(x, y, z, w) - - #[doc = "Corresponds to a GLSL `uvec2` in std430 layout."] align(8) uvec2 UVec2(x, y) - #[doc = "Corresponds to a GLSL `uvec3` in std430 layout."] align(16) uvec3 UVec3(x, y, z) - #[doc = "Corresponds to a GLSL `uvec4` in std430 layout."] align(16) uvec4 UVec4(x, y, z, w) - - // bool vectors are disabled due to https://github.com/LPGhatguy/crevice/issues/36 - - // #[doc = "Corresponds to a GLSL `bvec2` in std430 layout."] align(8) bvec2 BVec2(x, y) - // #[doc = "Corresponds to a GLSL `bvec3` in std430 layout."] align(16) bvec3 BVec3(x, y, z) - // #[doc = "Corresponds to a GLSL `bvec4` in std430 layout."] align(16) bvec4 BVec4(x, y, z, w) - - #[doc = "Corresponds to a GLSL `dvec2` in std430 layout."] align(16) dvec2 DVec2(x, y) - #[doc = "Corresponds to a GLSL `dvec3` in std430 layout."] align(32) dvec3 DVec3(x, y, z) - #[doc = "Corresponds to a GLSL `dvec4` in std430 layout."] align(32) dvec4 DVec4(x, y, z, w) -} - -macro_rules! matrices { - ( - $( - #[$doc:meta] - align($align:literal) - $glsl_name:ident $name:ident { - $($field:ident: $field_ty:ty,)+ - } - )+ - ) => { - $( - #[$doc] - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - #[repr(C)] - pub struct $name { - $(pub $field: $field_ty,)+ - } - - unsafe impl Zeroable for $name {} - unsafe impl Pod for $name {} - - unsafe impl Std430 for $name { - const ALIGNMENT: usize = $align; - /// Matrices are technically arrays of primitives, and as such require pad at end. - const PAD_AT_END: bool = true; - type Padded = Std430Padded(), $align)}>; - } - - unsafe impl Glsl for $name { - const NAME: &'static str = stringify!($glsl_name); - } - )+ - }; -} - -matrices! { - #[doc = "Corresponds to a GLSL `mat2` in std430 layout."] - align(8) - mat2 Mat2 { - x: Vec2, - y: Vec2, - } - - #[doc = "Corresponds to a GLSL `mat3` in std430 layout."] - align(16) - mat3 Mat3 { - x: Vec3, - _pad_x: f32, - y: Vec3, - _pad_y: f32, - z: Vec3, - _pad_z: f32, - } - - #[doc = "Corresponds to a GLSL `mat4` in std430 layout."] - align(16) - mat4 Mat4 { - x: Vec4, - y: Vec4, - z: Vec4, - w: Vec4, - } - - #[doc = "Corresponds to a GLSL `dmat2` in std430 layout."] - align(16) - dmat2 DMat2 { - x: DVec2, - y: DVec2, - } - - #[doc = "Corresponds to a GLSL `dmat3` in std430 layout."] - align(32) - dmat3 DMat3 { - x: DVec3, - _pad_x: f64, - y: DVec3, - _pad_y: f64, - z: DVec3, - _pad_z: f64, - } - - #[doc = "Corresponds to a GLSL `dmat3` in std430 layout."] - align(32) - dmat4 DMat4 { - x: DVec4, - y: DVec4, - z: DVec4, - w: DVec4, - } -} diff --git a/crates/bevy_crevice/src/std430/sizer.rs b/crates/bevy_crevice/src/std430/sizer.rs deleted file mode 100644 index 05203d5577d9c..0000000000000 --- a/crates/bevy_crevice/src/std430/sizer.rs +++ /dev/null @@ -1,81 +0,0 @@ -use core::mem::size_of; - -use crate::internal::align_offset; -use crate::std430::{AsStd430, Std430}; - -/** -Type that computes the buffer size needed by a series of `std430` types laid -out. - -This type works well well when paired with `Writer`, precomputing a buffer's -size to alleviate the need to dynamically re-allocate buffers. - -## Example - -```glsl -struct Frob { - vec3 size; - float frobiness; -} - -buffer FROBS { - uint len; - Frob[] frobs; -} frobs; -``` - -``` -use bevy_crevice::std430::{self, AsStd430}; - -#[derive(AsStd430)] -struct Frob { - size: mint::Vector3, - frobiness: f32, -} - -// Many APIs require that buffers contain at least enough space for all -// fixed-size bindiongs to a buffer as well as one element of any arrays, if -// there are any. -let mut sizer = std430::Sizer::new(); -sizer.add::(); -sizer.add::(); - -# fn create_buffer_with_size(size: usize) {} -let buffer = create_buffer_with_size(sizer.len()); -# assert_eq!(sizer.len(), 32); -``` -*/ -pub struct Sizer { - offset: usize, -} - -impl Sizer { - /// Create a new `Sizer`. - pub fn new() -> Self { - Self { offset: 0 } - } - - /// Add a type's necessary padding and size to the `Sizer`. Returns the - /// offset into the buffer where that type would be written. - pub fn add(&mut self) -> usize - where - T: AsStd430, - { - let size = size_of::<::Output>(); - let alignment = ::Output::ALIGNMENT; - let padding = align_offset(self.offset, alignment); - - self.offset += padding; - let write_here = self.offset; - - self.offset += size; - - write_here - } - - /// Returns the number of bytes required to contain all the types added to - /// the `Sizer`. - pub fn len(&self) -> usize { - self.offset - } -} diff --git a/crates/bevy_crevice/src/std430/traits.rs b/crates/bevy_crevice/src/std430/traits.rs deleted file mode 100644 index 04f9f526d7f3a..0000000000000 --- a/crates/bevy_crevice/src/std430/traits.rs +++ /dev/null @@ -1,291 +0,0 @@ -use core::mem::{size_of, MaybeUninit}; -#[cfg(feature = "std")] -use std::io::{self, Write}; - -use bytemuck::{bytes_of, Pod, Zeroable}; - -#[cfg(feature = "std")] -use crate::std430::Writer; - -/// Trait implemented for all `std430` primitives. Generally should not be -/// implemented outside this crate. -pub unsafe trait Std430: Copy + Zeroable + Pod { - /// The required alignment of the type. Must be a power of two. - /// - /// This is distinct from the value returned by `std::mem::align_of` because - /// `AsStd430` structs do not use Rust's alignment. This enables them to - /// control and zero their padding bytes, making converting them to and from - /// slices safe. - const ALIGNMENT: usize; - - /// Whether this type requires a padding at the end (ie, is a struct or an array - /// of primitives). - /// See - /// (rule 4 and 9) - const PAD_AT_END: bool = false; - /// Padded type (Std430Padded specialization) - /// The usual implementation is - /// type Padded = Std430Padded(), ALIGNMENT)}>; - type Padded: Std430Convertible; - - /// Casts the type to a byte array. Implementors should not override this - /// method. - /// - /// # Safety - /// This is always safe due to the requirements of [`bytemuck::Pod`] being a - /// prerequisite for this trait. - fn as_bytes(&self) -> &[u8] { - bytes_of(self) - } -} - -unsafe impl Std430 for () { - const ALIGNMENT: usize = 0; - - const PAD_AT_END: bool = false; - - type Padded = (); -} - -/// Trait specifically for Std430::Padded, implements conversions between padded type and base type. -pub trait Std430Convertible: Copy { - /// Convert from self to Std430 - fn into_std430(self) -> T; - /// Convert from Std430 to self - fn from_std430(_: T) -> Self; -} - -impl Std430Convertible for T { - fn into_std430(self) -> T { - self - } - fn from_std430(also_self: T) -> Self { - also_self - } -} - -/// Unfortunately, we cannot easily derive padded representation for generic Std140 types. -/// For now, we'll just use this empty enum with no values. -#[derive(Copy, Clone)] -pub enum InvalidPadded {} -impl Std430Convertible for InvalidPadded { - fn into_std430(self) -> T { - unimplemented!() - } - fn from_std430(_: T) -> Self { - unimplemented!() - } -} -/** -Trait implemented for all types that can be turned into `std430` values. - -This trait can often be `#[derive]`'d instead of manually implementing it. Any -struct which contains only fields that also implement `AsStd430` can derive -`AsStd430`. - -Types from the mint crate implement `AsStd430`, making them convenient for use -in uniform types. Most Rust geometry crates, like cgmath, nalgebra, and -ultraviolet support mint. - -## Example - -```glsl -uniform CAMERA { - mat4 view; - mat4 projection; -} camera; -``` - -```no_run -use bevy_crevice::std430::{AsStd430, Std430}; - -#[derive(AsStd430)] -struct CameraUniform { - view: mint::ColumnMatrix4, - projection: mint::ColumnMatrix4, -} - -let view: mint::ColumnMatrix4 = todo!("your math code here"); -let projection: mint::ColumnMatrix4 = todo!("your math code here"); - -let camera = CameraUniform { - view, - projection, -}; - -# fn write_to_gpu_buffer(bytes: &[u8]) {} -let camera_std430 = camera.as_std430(); -write_to_gpu_buffer(camera_std430.as_bytes()); -``` -*/ -pub trait AsStd430 { - /// The `std430` version of this value. - type Output: Std430; - - /// Convert this value into the `std430` version of itself. - fn as_std430(&self) -> Self::Output; - - /// Returns the size of the `std430` version of this type. Useful for - /// pre-sizing buffers. - fn std430_size_static() -> usize { - size_of::() - } - - /// Converts from `std430` version of self to self. - fn from_std430(value: Self::Output) -> Self; -} - -impl AsStd430 for T -where - T: Std430, -{ - type Output = Self; - - fn as_std430(&self) -> Self { - *self - } - - fn from_std430(value: Self) -> Self { - value - } -} - -#[doc(hidden)] -#[derive(Copy, Clone, Debug)] -pub struct Std430Padded { - inner: T, - _padding: [u8; PAD], -} - -unsafe impl Zeroable for Std430Padded {} -unsafe impl Pod for Std430Padded {} - -impl Std430Convertible for Std430Padded { - fn into_std430(self) -> T { - self.inner - } - - fn from_std430(inner: T) -> Self { - Self { - inner, - _padding: [0u8; PAD], - } - } -} - -#[doc(hidden)] -#[derive(Copy, Clone, Debug)] -#[repr(transparent)] -pub struct Std430Array([T::Padded; N]); - -unsafe impl Zeroable for Std430Array where T::Padded: Zeroable {} -unsafe impl Pod for Std430Array where T::Padded: Pod {} -unsafe impl Std430 for Std430Array -where - T::Padded: Pod, -{ - const ALIGNMENT: usize = T::ALIGNMENT; - type Padded = Self; -} - -impl Std430Array { - fn uninit_array() -> [MaybeUninit; N] { - unsafe { MaybeUninit::uninit().assume_init() } - } - - fn from_uninit_array(a: [MaybeUninit; N]) -> Self { - unsafe { core::mem::transmute_copy(&a) } - } -} - -impl AsStd430 for [T; N] -where - ::Padded: Pod, -{ - type Output = Std430Array; - fn as_std430(&self) -> Self::Output { - let mut res = Self::Output::uninit_array(); - - for i in 0..N { - res[i] = MaybeUninit::new(Std430Convertible::from_std430(self[i].as_std430())); - } - - Self::Output::from_uninit_array(res) - } - - fn from_std430(val: Self::Output) -> Self { - let mut res: [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; - for i in 0..N { - res[i] = MaybeUninit::new(T::from_std430(val.0[i].into_std430())); - } - unsafe { core::mem::transmute_copy(&res) } - } -} - -/// Trait implemented for all types that can be written into a buffer as -/// `std430` bytes. This type is more general than [`AsStd430`]: all `AsStd430` -/// types implement `WriteStd430`, but not the other way around. -/// -/// While `AsStd430` requires implementers to return a type that implements the -/// `Std430` trait, `WriteStd430` directly writes bytes using a [`Writer`]. This -/// makes `WriteStd430` usable for writing slices or other DSTs that could not -/// implement `AsStd430` without allocating new memory on the heap. -#[cfg(feature = "std")] -pub trait WriteStd430 { - /// Writes this value into the given [`Writer`] using `std430` layout rules. - /// - /// Should return the offset of the first byte of this type, as returned by - /// the first call to [`Writer::write`]. - fn write_std430(&self, writer: &mut Writer) -> io::Result; - - /// The space required to write this value using `std430` layout rules. This - /// does not include alignment padding that may be needed before or after - /// this type when written as part of a larger buffer. - fn std430_size(&self) -> usize { - let mut writer = Writer::new(io::sink()); - self.write_std430(&mut writer).unwrap(); - writer.len() - } -} - -#[cfg(feature = "std")] -impl WriteStd430 for T -where - T: AsStd430, -{ - fn write_std430(&self, writer: &mut Writer) -> io::Result { - writer.write_std430(&self.as_std430()) - } - - fn std430_size(&self) -> usize { - size_of::<::Output>() - } -} - -#[cfg(feature = "std")] -impl WriteStd430 for [T] -where - T: WriteStd430, -{ - fn write_std430(&self, writer: &mut Writer) -> io::Result { - let mut offset = writer.len(); - - let mut iter = self.iter(); - - if let Some(item) = iter.next() { - offset = item.write_std430(writer)?; - } - - for item in iter { - item.write_std430(writer)?; - } - - Ok(offset) - } - - fn std430_size(&self) -> usize { - let mut writer = Writer::new(io::sink()); - self.write_std430(&mut writer).unwrap(); - writer.len() - } -} diff --git a/crates/bevy_crevice/src/std430/writer.rs b/crates/bevy_crevice/src/std430/writer.rs deleted file mode 100644 index 199ab3ab50abc..0000000000000 --- a/crates/bevy_crevice/src/std430/writer.rs +++ /dev/null @@ -1,150 +0,0 @@ -use std::io::{self, Write}; -use std::mem::size_of; - -use bytemuck::bytes_of; - -use crate::internal::align_offset; -use crate::std430::{AsStd430, Std430, WriteStd430}; - -/** -Type that enables writing correctly aligned `std430` values to a buffer. - -`Writer` is useful when many values need to be laid out in a row that cannot be -represented by a struct alone, like dynamically sized arrays or dynamically -laid-out values. - -## Example -In this example, we'll write a length-prefixed list of lights to a buffer. -`std430::Writer` helps align correctly, even across multiple structs, which can -be tricky and error-prone otherwise. - -```glsl -struct PointLight { - vec3 position; - vec3 color; - float brightness; -}; - -buffer POINT_LIGHTS { - uint len; - PointLight[] lights; -} point_lights; -``` - -``` -use bevy_crevice::std430::{self, AsStd430}; - -#[derive(AsStd430)] -struct PointLight { - position: mint::Vector3, - color: mint::Vector3, - brightness: f32, -} - -let lights = vec![ - PointLight { - position: [0.0, 1.0, 0.0].into(), - color: [1.0, 0.0, 0.0].into(), - brightness: 0.6, - }, - PointLight { - position: [0.0, 4.0, 3.0].into(), - color: [1.0, 1.0, 1.0].into(), - brightness: 1.0, - }, -]; - -# fn map_gpu_buffer_for_write() -> &'static mut [u8] { -# Box::leak(vec![0; 1024].into_boxed_slice()) -# } -let target_buffer = map_gpu_buffer_for_write(); -let mut writer = std430::Writer::new(target_buffer); - -let light_count = lights.len() as u32; -writer.write(&light_count)?; - -// Crevice will automatically insert the required padding to align the -// PointLight structure correctly. In this case, there will be 12 bytes of -// padding between the length field and the light list. - -writer.write(lights.as_slice())?; - -# fn unmap_gpu_buffer() {} -unmap_gpu_buffer(); - -# Ok::<(), std::io::Error>(()) -``` -*/ -pub struct Writer { - writer: W, - offset: usize, -} - -impl Writer { - /// Create a new `Writer`, wrapping a buffer, file, or other type that - /// implements [`std::io::Write`]. - pub fn new(writer: W) -> Self { - Self { writer, offset: 0 } - } - - /// Write a new value to the underlying buffer, writing zeroed padding where - /// necessary. - /// - /// Returns the offset into the buffer that the value was written to. - pub fn write(&mut self, value: &T) -> io::Result - where - T: WriteStd430 + ?Sized, - { - value.write_std430(self) - } - - /// Write an iterator of values to the underlying buffer. - /// - /// Returns the offset into the buffer that the first value was written to. - /// If no values were written, returns the `len()`. - pub fn write_iter(&mut self, iter: I) -> io::Result - where - I: IntoIterator, - T: WriteStd430, - { - let mut offset = self.offset; - - let mut iter = iter.into_iter(); - - if let Some(item) = iter.next() { - offset = item.write_std430(self)?; - } - - for item in iter { - item.write_std430(self)?; - } - - Ok(offset) - } - - /// Write an `Std430` type to the underlying buffer. - pub fn write_std430(&mut self, value: &T) -> io::Result - where - T: Std430, - { - let padding = align_offset(self.offset, T::ALIGNMENT); - - for _ in 0..padding { - self.writer.write_all(&[0])?; - } - self.offset += padding; - - let value = value.as_std430(); - self.writer.write_all(bytes_of(&value))?; - - let write_here = self.offset; - self.offset += size_of::(); - - Ok(write_here) - } - - /// Returns the amount of data written by this `Writer`. - pub fn len(&self) -> usize { - self.offset - } -} diff --git a/crates/bevy_crevice/src/util.rs b/crates/bevy_crevice/src/util.rs deleted file mode 100644 index 9c6c2a396450d..0000000000000 --- a/crates/bevy_crevice/src/util.rs +++ /dev/null @@ -1,97 +0,0 @@ -#![allow(unused_macros)] - -macro_rules! easy_impl { - ( $( $std_name:ident $imp_ty:ty { $($field:ident),* }, )* ) => { - $( - impl crate::std140::AsStd140 for $imp_ty { - type Output = crate::std140::$std_name; - - #[inline] - fn as_std140(&self) -> Self::Output { - crate::std140::$std_name { - $( - $field: self.$field.as_std140(), - )* - ..bytemuck::Zeroable::zeroed() - } - } - - #[inline] - fn from_std140(value: Self::Output) -> Self { - Self { - $( - $field: <_ as crate::std140::AsStd140>::from_std140(value.$field), - )* - } - } - } - - impl crate::std430::AsStd430 for $imp_ty { - type Output = crate::std430::$std_name; - - #[inline] - fn as_std430(&self) -> Self::Output { - crate::std430::$std_name { - $( - $field: self.$field.as_std430(), - )* - ..bytemuck::Zeroable::zeroed() - } - } - - #[inline] - fn from_std430(value: Self::Output) -> Self { - Self { - $( - $field: <_ as crate::std430::AsStd430>::from_std430(value.$field), - )* - } - } - } - - unsafe impl crate::glsl::Glsl for $imp_ty { - const NAME: &'static str = crate::std140::$std_name::NAME; - } - )* - }; -} - -macro_rules! minty_impl { - ( $( $mint_ty:ty => $imp_ty:ty, )* ) => { - $( - impl crate::std140::AsStd140 for $imp_ty { - type Output = <$mint_ty as crate::std140::AsStd140>::Output; - - #[inline] - fn as_std140(&self) -> Self::Output { - let mint: $mint_ty = (*self).into(); - mint.as_std140() - } - - #[inline] - fn from_std140(value: Self::Output) -> Self { - <$mint_ty>::from_std140(value).into() - } - } - - impl crate::std430::AsStd430 for $imp_ty { - type Output = <$mint_ty as crate::std430::AsStd430>::Output; - - #[inline] - fn as_std430(&self) -> Self::Output { - let mint: $mint_ty = (*self).into(); - mint.as_std430() - } - - #[inline] - fn from_std430(value: Self::Output) -> Self { - <$mint_ty>::from_std430(value).into() - } - } - - unsafe impl crate::glsl::Glsl for $imp_ty { - const NAME: &'static str = <$mint_ty>::NAME; - } - )* - }; -} diff --git a/crates/bevy_crevice/tests/snapshots/test__generate_struct_array_glsl.snap b/crates/bevy_crevice/tests/snapshots/test__generate_struct_array_glsl.snap deleted file mode 100644 index 7829bd64ca141..0000000000000 --- a/crates/bevy_crevice/tests/snapshots/test__generate_struct_array_glsl.snap +++ /dev/null @@ -1,8 +0,0 @@ ---- -source: tests/test.rs -expression: "TestGlsl::glsl_definition()" - ---- -struct TestGlsl { - vec3 foo[8][4]; -}; diff --git a/crates/bevy_crevice/tests/snapshots/test__generate_struct_glsl.snap b/crates/bevy_crevice/tests/snapshots/test__generate_struct_glsl.snap deleted file mode 100644 index 42fc1f4cd770e..0000000000000 --- a/crates/bevy_crevice/tests/snapshots/test__generate_struct_glsl.snap +++ /dev/null @@ -1,9 +0,0 @@ ---- -source: tests/test.rs -expression: "TestGlsl::glsl_definition()" - ---- -struct TestGlsl { - vec3 foo; - mat2 bar; -}; diff --git a/crates/bevy_crevice/tests/test.rs b/crates/bevy_crevice/tests/test.rs deleted file mode 100644 index 693ce080c7f12..0000000000000 --- a/crates/bevy_crevice/tests/test.rs +++ /dev/null @@ -1,61 +0,0 @@ -use bevy_crevice::glsl::GlslStruct; -use bevy_crevice::std140::AsStd140; - -#[test] -fn there_and_back_again() { - #[derive(AsStd140, Debug, PartialEq)] - struct ThereAndBackAgain { - view: mint::ColumnMatrix3, - origin: mint::Vector3, - } - - let x = ThereAndBackAgain { - view: mint::ColumnMatrix3 { - x: mint::Vector3 { - x: 1.0, - y: 0.0, - z: 0.0, - }, - y: mint::Vector3 { - x: 0.0, - y: 1.0, - z: 0.0, - }, - z: mint::Vector3 { - x: 0.0, - y: 0.0, - z: 1.0, - }, - }, - origin: mint::Vector3 { - x: 0.0, - y: 1.0, - z: 2.0, - }, - }; - let x_as = x.as_std140(); - assert_eq!(::from_std140(x_as), x); -} - -#[test] -fn generate_struct_glsl() { - #[allow(dead_code)] - #[derive(GlslStruct)] - struct TestGlsl { - foo: mint::Vector3, - bar: mint::ColumnMatrix2, - } - - insta::assert_display_snapshot!(TestGlsl::glsl_definition()); -} - -#[test] -fn generate_struct_array_glsl() { - #[allow(dead_code)] - #[derive(GlslStruct)] - struct TestGlsl { - foo: [[mint::Vector3; 8]; 4], - } - - insta::assert_display_snapshot!(TestGlsl::glsl_definition()); -} diff --git a/crates/bevy_diagnostic/Cargo.toml b/crates/bevy_diagnostic/Cargo.toml index de94e29af557b..5bdd7236e490b 100644 --- a/crates/bevy_diagnostic/Cargo.toml +++ b/crates/bevy_diagnostic/Cargo.toml @@ -12,7 +12,7 @@ keywords = ["bevy"] [dependencies] # bevy bevy_app = { path = "../bevy_app", version = "0.8.0-dev" } -bevy_core = { path = "../bevy_core", version = "0.8.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev" } bevy_log = { path = "../bevy_log", version = "0.8.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.8.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } diff --git a/crates/bevy_diagnostic/src/diagnostic.rs b/crates/bevy_diagnostic/src/diagnostic.rs index ba8d1a0c041bf..f53e04307e869 100644 --- a/crates/bevy_diagnostic/src/diagnostic.rs +++ b/crates/bevy_diagnostic/src/diagnostic.rs @@ -37,22 +37,30 @@ pub struct Diagnostic { history: VecDeque, sum: f64, max_history_length: usize, + pub is_enabled: bool, } impl Diagnostic { + /// Add a new value as a [`DiagnosticMeasurement`]. Its timestamp will be [`Instant::now`]. pub fn add_measurement(&mut self, value: f64) { let time = Instant::now(); - if self.history.len() == self.max_history_length { - if let Some(removed_diagnostic) = self.history.pop_front() { - self.sum -= removed_diagnostic.value; + if self.max_history_length > 1 { + if self.history.len() == self.max_history_length { + if let Some(removed_diagnostic) = self.history.pop_front() { + self.sum -= removed_diagnostic.value; + } } - } - self.sum += value; + self.sum += value; + } else { + self.history.clear(); + self.sum = value; + } self.history .push_back(DiagnosticMeasurement { time, value }); } + /// Create a new diagnostic with the given ID, name and maximum history. pub fn new( id: DiagnosticId, name: impl Into>, @@ -74,28 +82,30 @@ impl Diagnostic { history: VecDeque::with_capacity(max_history_length), max_history_length, sum: 0.0, + is_enabled: true, } } + /// Add a suffix to use when logging the value, can be used to show a unit. #[must_use] pub fn with_suffix(mut self, suffix: impl Into>) -> Self { self.suffix = suffix.into(); self } + /// Get the latest measurement from this diagnostic. #[inline] pub fn measurement(&self) -> Option<&DiagnosticMeasurement> { self.history.back() } + /// Get the latest value from this diagnostic. pub fn value(&self) -> Option { self.measurement().map(|measurement| measurement.value) } - pub fn sum(&self) -> f64 { - self.sum - } - + /// Return the mean (average) of this diagnostic's values. + /// N.B. this a cheap operation as the sum is cached. pub fn average(&self) -> Option { if !self.history.is_empty() { Some(self.sum / self.history.len() as f64) @@ -104,10 +114,12 @@ impl Diagnostic { } } + /// Return the number of elements for this diagnostic. pub fn history_len(&self) -> usize { self.history.len() } + /// Return the duration between the oldest and most recent values for this diagnostic. pub fn duration(&self) -> Option { if self.history.len() < 2 { return None; @@ -122,6 +134,7 @@ impl Diagnostic { None } + /// Return the maximum number of elements for this diagnostic. pub fn get_max_history_length(&self) -> usize { self.max_history_length } @@ -133,6 +146,11 @@ impl Diagnostic { pub fn measurements(&self) -> impl Iterator { self.history.iter() } + + /// Clear the history of this diagnostic. + pub fn clear_history(&mut self) { + self.history.clear(); + } } /// A collection of [Diagnostic]s @@ -144,6 +162,7 @@ pub struct Diagnostics { } impl Diagnostics { + /// Add a new [`Diagnostic`]. pub fn add(&mut self, diagnostic: Diagnostic) { self.diagnostics.insert(diagnostic.id, diagnostic); } @@ -156,18 +175,31 @@ impl Diagnostics { self.diagnostics.get_mut(&id) } + /// Get the latest [`DiagnosticMeasurement`] from an enabled [`Diagnostic`]. pub fn get_measurement(&self, id: DiagnosticId) -> Option<&DiagnosticMeasurement> { self.diagnostics .get(&id) + .filter(|diagnostic| diagnostic.is_enabled) .and_then(|diagnostic| diagnostic.measurement()) } - pub fn add_measurement(&mut self, id: DiagnosticId, value: f64) { - if let Some(diagnostic) = self.diagnostics.get_mut(&id) { - diagnostic.add_measurement(value); + /// Add a measurement to an enabled [`Diagnostic`]. The measurement is passed as a function so that + /// it will be evaluated only if the [`Diagnostic`] is enabled. This can be useful if the value is + /// costly to calculate. + pub fn add_measurement(&mut self, id: DiagnosticId, value: F) + where + F: FnOnce() -> f64, + { + if let Some(diagnostic) = self + .diagnostics + .get_mut(&id) + .filter(|diagnostic| diagnostic.is_enabled) + { + diagnostic.add_measurement(value()); } } + /// Return an iterator over all [`Diagnostic`]. pub fn iter(&self) -> impl Iterator { self.diagnostics.values() } diff --git a/crates/bevy_diagnostic/src/entity_count_diagnostics_plugin.rs b/crates/bevy_diagnostic/src/entity_count_diagnostics_plugin.rs index add37dee3c556..94e10cb37dbfb 100644 --- a/crates/bevy_diagnostic/src/entity_count_diagnostics_plugin.rs +++ b/crates/bevy_diagnostic/src/entity_count_diagnostics_plugin.rs @@ -1,8 +1,5 @@ use bevy_app::{App, Plugin}; -use bevy_ecs::{ - system::{IntoExclusiveSystem, ResMut}, - world::World, -}; +use bevy_ecs::{entity::Entities, system::ResMut}; use crate::{Diagnostic, DiagnosticId, Diagnostics}; @@ -13,7 +10,7 @@ pub struct EntityCountDiagnosticsPlugin; impl Plugin for EntityCountDiagnosticsPlugin { fn build(&self, app: &mut App) { app.add_startup_system(Self::setup_system) - .add_system(Self::diagnostic_system.exclusive_system()); + .add_system(Self::diagnostic_system); } } @@ -25,10 +22,7 @@ impl EntityCountDiagnosticsPlugin { diagnostics.add(Diagnostic::new(Self::ENTITY_COUNT, "entity_count", 20)); } - pub fn diagnostic_system(world: &mut World) { - let entity_count = world.entities().len(); - if let Some(mut diagnostics) = world.get_resource_mut::() { - diagnostics.add_measurement(Self::ENTITY_COUNT, entity_count as f64); - } + pub fn diagnostic_system(mut diagnostics: ResMut, entities: &Entities) { + diagnostics.add_measurement(Self::ENTITY_COUNT, || entities.len() as f64); } } diff --git a/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs b/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs index e34f431e00e4d..bc07eaaad6887 100644 --- a/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs +++ b/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs @@ -1,7 +1,7 @@ use crate::{Diagnostic, DiagnosticId, Diagnostics}; use bevy_app::prelude::*; -use bevy_core::Time; use bevy_ecs::system::{Res, ResMut}; +use bevy_time::Time; /// Adds "frame time" diagnostic to an App, specifically "frame time", "fps" and "frame count" #[derive(Default)] @@ -37,29 +37,23 @@ impl FrameTimeDiagnosticsPlugin { time: Res> + // + // If we were to unconditionally add `$name`'s `with`/`without` accesses then `AnyOf<(&A, ())>` + // would have a `With` access which is incorrect as this `WorldQuery` will match entities that + // do not have the `A` component. This is the same logic as the `Or<...>: WorldQuery` impl. + // + // The correct thing to do here is to only add a `with`/`without` access to `_access` if all + // `$name` params have that `with`/`without` access. More jargony put- we add the intersection + // of all `with`/`without` accesses of the `$name` params to `_access`. + let mut _intersected_access = _access.clone(); + let mut _not_first = false; + $( + if _not_first { + let mut intermediate = _access.clone(); + $name::update_component_access($name, &mut intermediate); + _intersected_access.extend_intersect_filter(&intermediate); + _intersected_access.extend_access(&intermediate); + } else { + + $name::update_component_access($name, &mut _intersected_access); + _not_first = true; + } + )* + + *_access = _intersected_access; } - fn update_archetype_component_access(&self, _archetype: &Archetype, _access: &mut Access) { - let ($($name,)*) = &self.0; + fn update_archetype_component_access(state: &Self::State, _archetype: &Archetype, _access: &mut Access) { + let ($($name,)*) = &state.0; $( - if $name.matches_archetype(_archetype) { - $name.update_archetype_component_access(_archetype, _access); + if $name.matches_component_set(&|id| _archetype.contains(id)) { + $name::update_archetype_component_access($name, _archetype, _access); } )* } + } - fn matches_archetype(&self, _archetype: &Archetype) -> bool { - let ($($name,)*) = &self.0; - false $(|| $name.matches_archetype(_archetype))* + #[allow(non_snake_case)] + #[allow(clippy::unused_unit)] + impl<$($name: FetchState),*> FetchState for AnyOf<($($name,)*)> { + fn init(_world: &mut World) -> Self { + AnyOf(($($name::init(_world),)*)) } - fn matches_table(&self, _table: &Table) -> bool { + fn matches_component_set(&self, _set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { let ($($name,)*) = &self.0; - false $(|| $name.matches_table(_table))* + false $(|| $name.matches_component_set(_set_contains_id))* } } #[allow(non_snake_case)] #[allow(clippy::unused_unit)] - impl<$($name: WorldQuery),*> WorldQuery for AnyOf<($($name,)*)> { + // SAFETY: defers to soundness of `$name: WorldQuery` impl + unsafe impl<$($name: WorldQuery),*> WorldQuery for AnyOf<($($name,)*)> { + type ReadOnly = AnyOf<($($name::ReadOnly,)*)>; type State = AnyOf<($($name::State,)*)>; fn shrink<'wlong: 'wshort, 'wshort>(item: QueryItem<'wlong, Self>) -> QueryItem<'wshort, Self> { @@ -1697,7 +1599,7 @@ macro_rules! impl_anytuple_fetch { } /// SAFETY: each item in the tuple is read only - unsafe impl<'w, $($name: ReadOnlyFetch),*> ReadOnlyFetch for AnyOf<($(($name, bool),)*)> {} + unsafe impl<$($name: ReadOnlyWorldQuery),*> ReadOnlyWorldQuery for AnyOf<($($name,)*)> {} }; } @@ -1712,7 +1614,8 @@ pub struct NopFetch { state: PhantomData, } -impl<'w, State: FetchState> Fetch<'w> for NopFetch { +// SAFETY: NopFetch doesnt access anything +unsafe impl<'w, State: FetchState> Fetch<'w> for NopFetch { type Item = (); type State = State; @@ -1747,4 +1650,13 @@ impl<'w, State: FetchState> Fetch<'w> for NopFetch { #[inline(always)] unsafe fn table_fetch(&mut self, _table_row: usize) -> Self::Item {} + + fn update_component_access(_state: &Self::State, _access: &mut FilteredAccess) {} + + fn update_archetype_component_access( + _state: &Self::State, + _archetype: &Archetype, + _access: &mut Access, + ) { + } } diff --git a/crates/bevy_ecs/src/query/filter.rs b/crates/bevy_ecs/src/query/filter.rs index 63c83f6350b1a..1626dd43fc013 100644 --- a/crates/bevy_ecs/src/query/filter.rs +++ b/crates/bevy_ecs/src/query/filter.rs @@ -4,7 +4,7 @@ use crate::{ entity::Entity, query::{ debug_checked_unreachable, Access, Fetch, FetchState, FilteredAccess, QueryFetch, - ROQueryFetch, WorldQuery, WorldQueryGats, + WorldQuery, WorldQueryGats, }, storage::{ComponentSparseSet, Table, Tables}, world::World, @@ -13,7 +13,7 @@ use bevy_ecs_macros::all_tuples; use bevy_ptr::{ThinSlicePtr, UnsafeCellDeref}; use std::{cell::UnsafeCell, marker::PhantomData}; -use super::ReadOnlyFetch; +use super::ReadOnlyWorldQuery; /// Filter that selects entities with a component `T`. /// @@ -44,9 +44,12 @@ use super::ReadOnlyFetch; /// ``` pub struct With(PhantomData); -impl WorldQuery for With { +// SAFETY: `ROQueryFetch` is the same as `QueryFetch` +unsafe impl WorldQuery for With { + type ReadOnly = Self; type State = WithState; + #[allow(clippy::semicolon_if_nothing_returned)] fn shrink<'wlong: 'wshort, 'wshort>( item: super::QueryItem<'wlong, Self>, ) -> super::QueryItem<'wshort, Self> { @@ -67,8 +70,7 @@ pub struct WithState { marker: PhantomData, } -// SAFETY: no component access or archetype component access -unsafe impl FetchState for WithState { +impl FetchState for WithState { fn init(world: &mut World) -> Self { let component_id = world.init_component::(); Self { @@ -77,35 +79,18 @@ unsafe impl FetchState for WithState { } } - #[inline] - fn update_component_access(&self, access: &mut FilteredAccess) { - access.add_with(self.component_id); - } - - #[inline] - fn update_archetype_component_access( - &self, - _archetype: &Archetype, - _access: &mut Access, - ) { - } - - fn matches_archetype(&self, archetype: &Archetype) -> bool { - archetype.contains(self.component_id) - } - - fn matches_table(&self, table: &Table) -> bool { - table.has_column(self.component_id) + fn matches_component_set(&self, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { + set_contains_id(self.component_id) } } impl WorldQueryGats<'_> for With { type Fetch = WithFetch; - type ReadOnlyFetch = WithFetch; type _State = WithState; } -impl<'w, T: Component> Fetch<'w> for WithFetch { +// SAFETY: no component access or archetype component access +unsafe impl<'w, T: Component> Fetch<'w> for WithFetch { type Item = (); type State = WithState; @@ -146,10 +131,23 @@ impl<'w, T: Component> Fetch<'w> for WithFetch { #[inline] unsafe fn table_fetch(&mut self, _table_row: usize) {} + + #[inline] + fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { + access.add_with(state.component_id); + } + + #[inline] + fn update_archetype_component_access( + _state: &Self::State, + _archetype: &Archetype, + _access: &mut Access, + ) { + } } // SAFETY: no component access or archetype component access -unsafe impl ReadOnlyFetch for WithFetch {} +unsafe impl ReadOnlyWorldQuery for With {} impl Clone for WithFetch { fn clone(&self) -> Self { @@ -187,9 +185,12 @@ impl Copy for WithFetch {} /// ``` pub struct Without(PhantomData); -impl WorldQuery for Without { +// SAFETY: `ROQueryFetch` is the same as `QueryFetch` +unsafe impl WorldQuery for Without { + type ReadOnly = Self; type State = WithoutState; + #[allow(clippy::semicolon_if_nothing_returned)] fn shrink<'wlong: 'wshort, 'wshort>( item: super::QueryItem<'wlong, Self>, ) -> super::QueryItem<'wshort, Self> { @@ -210,8 +211,7 @@ pub struct WithoutState { marker: PhantomData, } -// SAFETY: no component access or archetype component access -unsafe impl FetchState for WithoutState { +impl FetchState for WithoutState { fn init(world: &mut World) -> Self { let component_id = world.init_component::(); Self { @@ -220,35 +220,18 @@ unsafe impl FetchState for WithoutState { } } - #[inline] - fn update_component_access(&self, access: &mut FilteredAccess) { - access.add_without(self.component_id); - } - - #[inline] - fn update_archetype_component_access( - &self, - _archetype: &Archetype, - _access: &mut Access, - ) { - } - - fn matches_archetype(&self, archetype: &Archetype) -> bool { - !archetype.contains(self.component_id) - } - - fn matches_table(&self, table: &Table) -> bool { - !table.has_column(self.component_id) + fn matches_component_set(&self, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { + !set_contains_id(self.component_id) } } impl WorldQueryGats<'_> for Without { type Fetch = WithoutFetch; - type ReadOnlyFetch = WithoutFetch; type _State = WithoutState; } -impl<'w, T: Component> Fetch<'w> for WithoutFetch { +// SAFETY: no component access or archetype component access +unsafe impl<'w, T: Component> Fetch<'w> for WithoutFetch { type Item = (); type State = WithoutState; @@ -289,10 +272,23 @@ impl<'w, T: Component> Fetch<'w> for WithoutFetch { #[inline] unsafe fn table_fetch(&mut self, _table_row: usize) {} + + #[inline] + fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { + access.add_without(state.component_id); + } + + #[inline] + fn update_archetype_component_access( + _state: &Self::State, + _archetype: &Archetype, + _access: &mut Access, + ) { + } } // SAFETY: no component access or archetype component access -unsafe impl ReadOnlyFetch for WithoutFetch {} +unsafe impl ReadOnlyWorldQuery for Without {} impl Clone for WithoutFetch { fn clone(&self) -> Self { @@ -350,7 +346,9 @@ macro_rules! impl_query_filter_tuple { ($(($filter: ident, $state: ident)),*) => { #[allow(unused_variables)] #[allow(non_snake_case)] - impl<$($filter: WorldQuery),*> WorldQuery for Or<($($filter,)*)> { + // SAFETY: defers to soundness of `$filter: WorldQuery` impl + unsafe impl<$($filter: WorldQuery),*> WorldQuery for Or<($($filter,)*)> { + type ReadOnly = Or<($($filter::ReadOnly,)*)>; type State = Or<($($filter::State,)*)>; fn shrink<'wlong: 'wshort, 'wshort>(item: super::QueryItem<'wlong, Self>) -> super::QueryItem<'wshort, Self> { @@ -362,13 +360,13 @@ macro_rules! impl_query_filter_tuple { #[allow(non_snake_case)] impl<'w, $($filter: WorldQueryGats<'w>),*> WorldQueryGats<'w> for Or<($($filter,)*)> { type Fetch = Or<($(OrFetch<'w, QueryFetch<'w, $filter>>,)*)>; - type ReadOnlyFetch = Or<($(OrFetch<'w, ROQueryFetch<'w, $filter>>,)*)>; type _State = Or<($($filter::_State,)*)>; } #[allow(unused_variables)] #[allow(non_snake_case)] - impl<'w, $($filter: Fetch<'w>),*> Fetch<'w> for Or<($(OrFetch<'w, $filter>,)*)> { + // SAFETY: update_component_access and update_archetype_component_access are called for each item in the tuple + unsafe impl<'w, $($filter: Fetch<'w>),*> Fetch<'w> for Or<($(OrFetch<'w, $filter>,)*)> { type State = Or<($(<$filter as Fetch<'w>>::State,)*)>; type Item = bool; @@ -390,7 +388,7 @@ macro_rules! impl_query_filter_tuple { let ($($filter,)*) = &mut self.0; let ($($state,)*) = &state.0; $( - $filter.matches = $state.matches_table(table); + $filter.matches = $state.matches_component_set(&|id| table.has_column(id)); if $filter.matches { $filter.fetch.set_table($state, table); } @@ -402,7 +400,7 @@ macro_rules! impl_query_filter_tuple { let ($($filter,)*) = &mut self.0; let ($($state,)*) = &state.0; $( - $filter.matches = $state.matches_archetype(archetype); + $filter.matches = $state.matches_component_set(&|id| archetype.contains(id)); if $filter.matches { $filter.fetch.set_archetype($state, archetype, tables); } @@ -430,39 +428,60 @@ macro_rules! impl_query_filter_tuple { unsafe fn archetype_filter_fetch(&mut self, archetype_index: usize) -> bool { self.archetype_fetch(archetype_index) } - } - // SAFETY: update_component_access and update_archetype_component_access are called for each item in the tuple - #[allow(unused_variables)] - #[allow(non_snake_case)] - unsafe impl<$($filter: FetchState),*> FetchState for Or<($($filter,)*)> { - fn init(world: &mut World) -> Self { - Or(($($filter::init(world),)*)) - } + fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { + let ($($filter,)*) = &state.0; - fn update_component_access(&self, access: &mut FilteredAccess) { - let ($($filter,)*) = &self.0; - $($filter.update_component_access(access);)* + // We do not unconditionally add `$filter`'s `with`/`without` accesses to `access` + // as this would be unsound. For example the following two queries should conflict: + // - Query<&mut B, Or<(With, ())>> + // - Query<&mut B, Without> + // + // If we were to unconditionally add `$name`'s `with`/`without` accesses then `Or<(With, ())>` + // would have a `With` access which is incorrect as this `WorldQuery` will match entities that + // do not have the `A` component. This is the same logic as the `AnyOf<...>: WorldQuery` impl. + // + // The correct thing to do here is to only add a `with`/`without` access to `_access` if all + // `$filter` params have that `with`/`without` access. More jargony put- we add the intersection + // of all `with`/`without` accesses of the `$filter` params to `access`. + let mut _intersected_access = access.clone(); + let mut _not_first = false; + $( + if _not_first { + let mut intermediate = access.clone(); + $filter::update_component_access($filter, &mut intermediate); + _intersected_access.extend_intersect_filter(&intermediate); + _intersected_access.extend_access(&intermediate); + } else { + $filter::update_component_access($filter, &mut _intersected_access); + _not_first = true; + } + )* + + *access = _intersected_access; } - fn update_archetype_component_access(&self, archetype: &Archetype, access: &mut Access) { - let ($($filter,)*) = &self.0; - $($filter.update_archetype_component_access(archetype, access);)* + fn update_archetype_component_access(state: &Self::State, archetype: &Archetype, access: &mut Access) { + let ($($filter,)*) = &state.0; + $($filter::update_archetype_component_access($filter, archetype, access);)* } + } - fn matches_archetype(&self, archetype: &Archetype) -> bool { - let ($($filter,)*) = &self.0; - false $(|| $filter.matches_archetype(archetype))* + #[allow(unused_variables)] + #[allow(non_snake_case)] + impl<$($filter: FetchState),*> FetchState for Or<($($filter,)*)> { + fn init(world: &mut World) -> Self { + Or(($($filter::init(world),)*)) } - fn matches_table(&self, table: &Table) -> bool { + fn matches_component_set(&self, _set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { let ($($filter,)*) = &self.0; - false $(|| $filter.matches_table(table))* + false $(|| $filter.matches_component_set(_set_contains_id))* } } // SAFE: filters are read only - unsafe impl<'w, $($filter: Fetch<'w> + ReadOnlyFetch),*> ReadOnlyFetch for Or<($(OrFetch<'w, $filter>,)*)> {} + unsafe impl<$($filter: ReadOnlyWorldQuery),*> ReadOnlyWorldQuery for Or<($($filter,)*)> {} }; } @@ -500,7 +519,9 @@ macro_rules! impl_tick_filter { marker: PhantomData, } - impl WorldQuery for $name { + // SAFETY: `ROQueryFetch` is the same as `QueryFetch` + unsafe impl WorldQuery for $name { + type ReadOnly = Self; type State = $state_name; fn shrink<'wlong: 'wshort, 'wshort>(item: super::QueryItem<'wlong, Self>) -> super::QueryItem<'wshort, Self> { @@ -508,8 +529,7 @@ macro_rules! impl_tick_filter { } } - // SAFETY: this reads the T component. archetype component access and component access are updated to reflect that - unsafe impl FetchState for $state_name { + impl FetchState for $state_name { fn init(world: &mut World) -> Self { Self { component_id: world.init_component::(), @@ -517,42 +537,18 @@ macro_rules! impl_tick_filter { } } - #[inline] - fn update_component_access(&self, access: &mut FilteredAccess) { - if access.access().has_write(self.component_id) { - panic!("$state_name<{}> conflicts with a previous access in this query. Shared access cannot coincide with exclusive access.", - std::any::type_name::()); - } - access.add_read(self.component_id); - } - - #[inline] - fn update_archetype_component_access( - &self, - archetype: &Archetype, - access: &mut Access, - ) { - if let Some(archetype_component_id) = archetype.get_archetype_component_id(self.component_id) { - access.add_read(archetype_component_id); - } - } - - fn matches_archetype(&self, archetype: &Archetype) -> bool { - archetype.contains(self.component_id) - } - - fn matches_table(&self, table: &Table) -> bool { - table.has_column(self.component_id) + fn matches_component_set(&self, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { + set_contains_id(self.component_id) } } impl<'w, T: Component> WorldQueryGats<'w> for $name { type Fetch = $fetch_name<'w, T>; - type ReadOnlyFetch = $fetch_name<'w, T>; type _State = $state_name; } - impl<'w, T: Component> Fetch<'w> for $fetch_name<'w, T> { + // SAFETY: this reads the T component. archetype component access and component access are updated to reflect that + unsafe impl<'w, T: Component> Fetch<'w> for $fetch_name<'w, T> { type State = $state_name; type Item = bool; @@ -626,10 +622,30 @@ macro_rules! impl_tick_filter { unsafe fn archetype_filter_fetch(&mut self, archetype_index: usize) -> bool { self.archetype_fetch(archetype_index) } + + #[inline] + fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { + if access.access().has_write(state.component_id) { + panic!("$state_name<{}> conflicts with a previous access in this query. Shared access cannot coincide with exclusive access.", + std::any::type_name::()); + } + access.add_read(state.component_id); + } + + #[inline] + fn update_archetype_component_access( + state: &Self::State, + archetype: &Archetype, + access: &mut Access, + ) { + if let Some(archetype_component_id) = archetype.get_archetype_component_id(state.component_id) { + access.add_read(archetype_component_id); + } + } } /// SAFETY: read-only access - unsafe impl<'w, T: Component> ReadOnlyFetch for $fetch_name<'w, T> {} + unsafe impl ReadOnlyWorldQuery for $name {} impl Clone for $fetch_name<'_, T> { fn clone(&self) -> Self { diff --git a/crates/bevy_ecs/src/query/iter.rs b/crates/bevy_ecs/src/query/iter.rs index 25bc9720b2984..b3c304f231aa6 100644 --- a/crates/bevy_ecs/src/query/iter.rs +++ b/crates/bevy_ecs/src/query/iter.rs @@ -1,12 +1,13 @@ use crate::{ archetype::{ArchetypeId, Archetypes}, + entity::{Entities, Entity}, + prelude::World, query::{Fetch, QueryState, WorldQuery}, storage::{TableId, Tables}, - world::World, }; -use std::{marker::PhantomData, mem::MaybeUninit}; +use std::{borrow::Borrow, iter::FusedIterator, marker::PhantomData, mem::MaybeUninit}; -use super::{QueryFetch, QueryItem, ReadOnlyFetch}; +use super::{QueryFetch, QueryItem, ReadOnlyWorldQuery}; /// An [`Iterator`] over query results of a [`Query`](crate::system::Query). /// @@ -16,13 +17,7 @@ pub struct QueryIter<'w, 's, Q: WorldQuery, QF: Fetch<'w, State = Q::State>, F: tables: &'w Tables, archetypes: &'w Archetypes, query_state: &'s QueryState, - world: &'w World, - table_id_iter: std::slice::Iter<'s, TableId>, - archetype_id_iter: std::slice::Iter<'s, ArchetypeId>, - fetch: QF, - filter: QueryFetch<'w, F>, - current_len: usize, - current_index: usize, + cursor: QueryIterationCursor<'w, 's, Q, QF, F>, } impl<'w, 's, Q: WorldQuery, QF, F: WorldQuery> QueryIter<'w, 's, Q, QF, F> @@ -40,6 +35,88 @@ where last_change_tick: u32, change_tick: u32, ) -> Self { + QueryIter { + query_state, + tables: &world.storages().tables, + archetypes: &world.archetypes, + cursor: QueryIterationCursor::init(world, query_state, last_change_tick, change_tick), + } + } +} + +impl<'w, 's, Q: WorldQuery, QF, F: WorldQuery> Iterator for QueryIter<'w, 's, Q, QF, F> +where + QF: Fetch<'w, State = Q::State>, +{ + type Item = QF::Item; + + #[inline(always)] + fn next(&mut self) -> Option { + unsafe { + self.cursor + .next(self.tables, self.archetypes, self.query_state) + } + } + + fn size_hint(&self) -> (usize, Option) { + let max_size = self + .query_state + .matched_archetype_ids + .iter() + .map(|id| self.archetypes[*id].len()) + .sum(); + + let archetype_query = F::Fetch::IS_ARCHETYPAL && QF::IS_ARCHETYPAL; + let min_size = if archetype_query { max_size } else { 0 }; + (min_size, Some(max_size)) + } +} + +// This is correct as [`QueryIter`] always returns `None` once exhausted. +impl<'w, 's, Q: WorldQuery, QF, F: WorldQuery> FusedIterator for QueryIter<'w, 's, Q, QF, F> where + QF: Fetch<'w, State = Q::State> +{ +} + +/// An [`Iterator`] over query results of a [`Query`](crate::system::Query). +/// +/// This struct is created by the [`Query::iter_many`](crate::system::Query::iter_many) method. +pub struct QueryManyIter< + 'w, + 's, + Q: WorldQuery, + QF: Fetch<'w, State = Q::State>, + F: WorldQuery, + I: Iterator, +> where + I::Item: Borrow, +{ + entity_iter: I, + entities: &'w Entities, + tables: &'w Tables, + archetypes: &'w Archetypes, + fetch: QF, + filter: QueryFetch<'w, F>, + query_state: &'s QueryState, +} + +impl<'w, 's, Q: WorldQuery, QF: Fetch<'w, State = Q::State>, F: WorldQuery, I: Iterator> + QueryManyIter<'w, 's, Q, QF, F, I> +where + I::Item: Borrow, +{ + /// # Safety + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `query_state.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`](crate::world::WorldId) is unsound. + pub(crate) unsafe fn new>( + world: &'w World, + query_state: &'s QueryState, + entity_list: EntityList, + last_change_tick: u32, + change_tick: u32, + ) -> QueryManyIter<'w, 's, Q, QF, F, I> { let fetch = QF::init( world, &query_state.fetch_state, @@ -52,100 +129,59 @@ where last_change_tick, change_tick, ); - - QueryIter { - world, + QueryManyIter { query_state, - tables: &world.storages().tables, + entities: &world.entities, archetypes: &world.archetypes, + tables: &world.storages.tables, fetch, filter, - table_id_iter: query_state.matched_table_ids.iter(), - archetype_id_iter: query_state.matched_archetype_ids.iter(), - current_len: 0, - current_index: 0, + entity_iter: entity_list.into_iter(), } } } -impl<'w, 's, Q: WorldQuery, QF, F: WorldQuery> Iterator for QueryIter<'w, 's, Q, QF, F> +impl<'w, 's, Q: WorldQuery, QF: Fetch<'w, State = Q::State>, F: WorldQuery, I: Iterator> Iterator + for QueryManyIter<'w, 'w, Q, QF, F, I> where - QF: Fetch<'w, State = Q::State>, + I::Item: Borrow, { type Item = QF::Item; - // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual - // We can't currently reuse QueryIterationCursor in QueryIter for performance reasons. See #1763 for context. #[inline(always)] fn next(&mut self) -> Option { unsafe { - if QF::IS_DENSE && >::IS_DENSE { - loop { - if self.current_index == self.current_len { - let table_id = self.table_id_iter.next()?; - let table = &self.tables[*table_id]; - self.fetch.set_table(&self.query_state.fetch_state, table); - self.filter.set_table(&self.query_state.filter_state, table); - self.current_len = table.len(); - self.current_index = 0; - continue; - } - - if !self.filter.table_filter_fetch(self.current_index) { - self.current_index += 1; - continue; - } - - let item = self.fetch.table_fetch(self.current_index); - - self.current_index += 1; - return Some(item); + for entity in self.entity_iter.by_ref() { + let location = match self.entities.get(*entity.borrow()) { + Some(location) => location, + None => continue, + }; + + if !self + .query_state + .matched_archetypes + .contains(location.archetype_id.index()) + { + continue; } - } else { - loop { - if self.current_index == self.current_len { - let archetype_id = self.archetype_id_iter.next()?; - let archetype = &self.archetypes[*archetype_id]; - self.fetch.set_archetype( - &self.query_state.fetch_state, - archetype, - self.tables, - ); - self.filter.set_archetype( - &self.query_state.filter_state, - archetype, - self.tables, - ); - self.current_len = archetype.len(); - self.current_index = 0; - continue; - } - if !self.filter.archetype_filter_fetch(self.current_index) { - self.current_index += 1; - continue; - } + let archetype = &self.archetypes[location.archetype_id]; - let item = self.fetch.archetype_fetch(self.current_index); - self.current_index += 1; - return Some(item); + self.fetch + .set_archetype(&self.query_state.fetch_state, archetype, self.tables); + self.filter + .set_archetype(&self.query_state.filter_state, archetype, self.tables); + if self.filter.archetype_filter_fetch(location.index) { + return Some(self.fetch.archetype_fetch(location.index)); } } + None } } fn size_hint(&self) -> (usize, Option) { - let max_size = self - .query_state - .matched_archetypes - .ones() - .map(|index| self.world.archetypes[ArchetypeId::new(index)].len()) - .sum(); - - let archetype_query = F::Fetch::IS_ARCHETYPAL && QF::IS_ARCHETYPAL; - let min_size = if archetype_query { max_size } else { 0 }; - (min_size, Some(max_size)) + let (_, max_size) = self.entity_iter.size_hint(); + (0, max_size) } } @@ -153,7 +189,6 @@ pub struct QueryCombinationIter<'w, 's, Q: WorldQuery, F: WorldQuery, const K: u tables: &'w Tables, archetypes: &'w Archetypes, query_state: &'s QueryState, - world: &'w World, cursors: [QueryIterationCursor<'w, 's, Q, QueryFetch<'w, Q>, F>; K], } @@ -195,7 +230,6 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery, const K: usize> QueryCombinationIter< } QueryCombinationIter { - world, query_state, tables: &world.storages().tables, archetypes: &world.archetypes, @@ -242,7 +276,7 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery, const K: usize> QueryCombinationIter< let ptr = values.as_mut_ptr().cast::>(); for (offset, cursor) in self.cursors.iter_mut().enumerate() { - ptr.add(offset).write(cursor.peek_last().unwrap()) + ptr.add(offset).write(cursor.peek_last().unwrap()); } Some(values.assume_init()) @@ -268,11 +302,11 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery, const K: usize> QueryCombinationIter< // Iterator type is intentionally implemented only for read-only access. // Doing so for mutable references would be unsound, because calling `next` // multiple times would allow multiple owned references to the same data to exist. -impl<'w, 's, Q: WorldQuery, F: WorldQuery, const K: usize> Iterator +impl<'w, 's, Q: ReadOnlyWorldQuery, F: ReadOnlyWorldQuery, const K: usize> Iterator for QueryCombinationIter<'w, 's, Q, F, K> where - QueryFetch<'w, Q>: Clone + ReadOnlyFetch, - QueryFetch<'w, F>: Clone + ReadOnlyFetch, + QueryFetch<'w, Q>: Clone, + QueryFetch<'w, F>: Clone, { type Item = [QueryItem<'w, Q>; K]; @@ -289,9 +323,9 @@ where let max_size: usize = self .query_state - .matched_archetypes - .ones() - .map(|index| self.world.archetypes[ArchetypeId::new(index)].len()) + .matched_archetype_ids + .iter() + .map(|id| self.archetypes[*id].len()) .sum(); if max_size < K { @@ -324,13 +358,22 @@ where { fn len(&self) -> usize { self.query_state - .matched_archetypes - .ones() - .map(|index| self.world.archetypes[ArchetypeId::new(index)].len()) + .matched_archetype_ids + .iter() + .map(|id| self.archetypes[*id].len()) .sum() } } +// This is correct as [`QueryCombinationIter`] always returns `None` once exhausted. +impl<'w, 's, Q: ReadOnlyWorldQuery, F: ReadOnlyWorldQuery, const K: usize> FusedIterator + for QueryCombinationIter<'w, 's, Q, F, K> +where + QueryFetch<'w, Q>: Clone, + QueryFetch<'w, F>: Clone, +{ +} + struct QueryIterationCursor<'w, 's, Q: WorldQuery, QF: Fetch<'w, State = Q::State>, F: WorldQuery> { table_id_iter: std::slice::Iter<'s, TableId>, archetype_id_iter: std::slice::Iter<'s, ArchetypeId>, @@ -338,7 +381,7 @@ struct QueryIterationCursor<'w, 's, Q: WorldQuery, QF: Fetch<'w, State = Q::Stat filter: QueryFetch<'w, F>, current_len: usize, current_index: usize, - phantom: PhantomData<&'w Q>, + phantom: PhantomData<(&'w (), Q)>, } impl<'w, 's, Q: WorldQuery, QF, F: WorldQuery> Clone for QueryIterationCursor<'w, 's, Q, QF, F> @@ -363,6 +406,8 @@ impl<'w, 's, Q: WorldQuery, QF, F: WorldQuery> QueryIterationCursor<'w, 's, Q, Q where QF: Fetch<'w, State = Q::State>, { + const IS_DENSE: bool = QF::IS_DENSE && >::IS_DENSE; + unsafe fn init_empty( world: &'w World, query_state: &'s QueryState, @@ -409,7 +454,7 @@ where #[inline] unsafe fn peek_last(&mut self) -> Option { if self.current_index > 0 { - if QF::IS_DENSE && >::IS_DENSE { + if Self::IS_DENSE { Some(self.fetch.table_fetch(self.current_index - 1)) } else { Some(self.fetch.archetype_fetch(self.current_index - 1)) @@ -420,8 +465,7 @@ where } // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual - // We can't currently reuse QueryIterationCursor in QueryIter for performance reasons. See #1763 for context. + // QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual #[inline(always)] unsafe fn next( &mut self, @@ -429,7 +473,7 @@ where archetypes: &'w Archetypes, query_state: &'s QueryState, ) -> Option { - if QF::IS_DENSE && >::IS_DENSE { + if Self::IS_DENSE { loop { if self.current_index == self.current_len { let table_id = self.table_id_iter.next()?; diff --git a/crates/bevy_ecs/src/query/mod.rs b/crates/bevy_ecs/src/query/mod.rs index 0be28ee2cf9d1..857fe033ab1d4 100644 --- a/crates/bevy_ecs/src/query/mod.rs +++ b/crates/bevy_ecs/src/query/mod.rs @@ -21,6 +21,7 @@ unsafe fn debug_checked_unreachable() -> ! { mod tests { use super::WorldQuery; use crate::prelude::{AnyOf, Entity, Or, With, Without}; + use crate::system::{IntoSystem, Query, System}; use crate::{self as bevy_ecs, component::Component, world::World}; use std::collections::HashSet; @@ -455,7 +456,7 @@ mod tests { ) }) .collect::>(); - assert_eq!(custom_param_data, normal_data) + assert_eq!(custom_param_data, normal_data); } { @@ -516,4 +517,44 @@ mod tests { assert_eq!(custom_param_entities, normal_entities); } } + + #[test] + fn many_entities() { + let mut world = World::new(); + world.spawn().insert_bundle((A(0), B(0))); + world.spawn().insert_bundle((A(0), B(0))); + world.spawn().insert(A(0)); + world.spawn().insert(B(0)); + { + fn system(has_a: Query>, has_a_and_b: Query<(&A, &B)>) { + assert_eq!(has_a_and_b.iter_many(&has_a).count(), 2); + } + let mut system = IntoSystem::into_system(system); + system.initialize(&mut world); + system.run((), &mut world); + } + { + fn system(has_a: Query>, mut b_query: Query<&mut B>) { + b_query.many_for_each_mut(&has_a, |mut b| { + b.0 = 1; + }); + } + let mut system = IntoSystem::into_system(system); + system.initialize(&mut world); + system.run((), &mut world); + } + { + fn system(query: Query<(Option<&A>, &B)>) { + for (maybe_a, b) in &query { + match maybe_a { + Some(_) => assert_eq!(b.0, 1), + None => assert_eq!(b.0, 0), + } + } + } + let mut system = IntoSystem::into_system(system); + system.initialize(&mut world); + system.run((), &mut world); + } + } } diff --git a/crates/bevy_ecs/src/query/state.rs b/crates/bevy_ecs/src/query/state.rs index ae23c1c512e8e..cc769886b3c70 100644 --- a/crates/bevy_ecs/src/query/state.rs +++ b/crates/bevy_ecs/src/query/state.rs @@ -10,13 +10,13 @@ use crate::{ storage::TableId, world::{World, WorldId}, }; -use bevy_tasks::TaskPool; +use bevy_tasks::ComputeTaskPool; #[cfg(feature = "trace")] use bevy_utils::tracing::Instrument; use fixedbitset::FixedBitSet; -use std::fmt; +use std::{borrow::Borrow, fmt}; -use super::{QueryFetch, QueryItem, ROQueryFetch, ROQueryItem}; +use super::{QueryFetch, QueryItem, QueryManyIter, ROQueryFetch, ROQueryItem}; /// Provides scoped access to a [`World`] state according to a given [`WorldQuery`] and query filter. pub struct QueryState { @@ -47,13 +47,16 @@ impl QueryState { let filter_state = ::init(world); let mut component_access = FilteredAccess::default(); - fetch_state.update_component_access(&mut component_access); + QueryFetch::<'static, Q>::update_component_access(&fetch_state, &mut component_access); // Use a temporary empty FilteredAccess for filters. This prevents them from conflicting with the // main Query's `fetch_state` access. Filters are allowed to conflict with the main query fetch // because they are evaluated *before* a specific reference is constructed. let mut filter_component_access = FilteredAccess::default(); - filter_state.update_component_access(&mut filter_component_access); + QueryFetch::<'static, F>::update_component_access( + &filter_state, + &mut filter_component_access, + ); // Merge the temporary filter access with the main access. This ensures that filter access is // properly considered in a global "cross-query" context (both within systems and across systems). @@ -115,13 +118,23 @@ impl QueryState { /// Creates a new [`Archetype`]. pub fn new_archetype(&mut self, archetype: &Archetype) { - if self.fetch_state.matches_archetype(archetype) - && self.filter_state.matches_archetype(archetype) + if self + .fetch_state + .matches_component_set(&|id| archetype.contains(id)) + && self + .filter_state + .matches_component_set(&|id| archetype.contains(id)) { - self.fetch_state - .update_archetype_component_access(archetype, &mut self.archetype_component_access); - self.filter_state - .update_archetype_component_access(archetype, &mut self.archetype_component_access); + QueryFetch::<'static, Q>::update_archetype_component_access( + &self.fetch_state, + archetype, + &mut self.archetype_component_access, + ); + QueryFetch::<'static, F>::update_archetype_component_access( + &self.filter_state, + archetype, + &mut self.archetype_component_access, + ); let archetype_index = archetype.id().index(); if !self.matched_archetypes.contains(archetype_index) { self.matched_archetypes.grow(archetype_index + 1); @@ -548,6 +561,32 @@ impl QueryState { } } + /// Returns an [`Iterator`] over the query results of a list of [`Entity`]'s. + /// + /// This can only return immutable data (mutable data will be cast to an immutable form). + /// See [`Self::many_for_each_mut`] for queries that contain at least one mutable component. + /// + #[inline] + pub fn iter_many<'w, 's, EntityList: IntoIterator>( + &'s mut self, + world: &'w World, + entities: EntityList, + ) -> QueryManyIter<'w, 's, Q, ROQueryFetch<'w, Q>, F, EntityList::IntoIter> + where + EntityList::Item: Borrow, + { + // SAFETY: query is read only + unsafe { + self.update_archetypes(world); + self.iter_many_unchecked_manual( + entities, + world, + world.last_change_tick(), + world.read_change_tick(), + ) + } + } + /// Returns an [`Iterator`] over the query results for the given [`World`]. /// /// # Safety @@ -603,6 +642,35 @@ impl QueryState { QueryIter::new(world, self, last_change_tick, change_tick) } + /// Returns an [`Iterator`] for the given [`World`] and list of [`Entity`]'s, where the last change and + /// the current change tick are given. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// this does not check for entity uniqueness + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + #[inline] + pub(crate) unsafe fn iter_many_unchecked_manual< + 'w, + 's, + QF: Fetch<'w, State = Q::State>, + EntityList: IntoIterator, + >( + &'s self, + entities: EntityList, + world: &'w World, + last_change_tick: u32, + change_tick: u32, + ) -> QueryManyIter<'w, 's, Q, QF, F, EntityList::IntoIter> + where + EntityList::Item: Borrow, + { + QueryManyIter::new(world, self, entities, last_change_tick, change_tick) + } + /// Returns an [`Iterator`] over all possible combinations of `K` query results for the /// given [`World`] without repetition. /// This can only be called for read-only queries. @@ -685,15 +753,18 @@ impl QueryState { ); } - /// Runs `func` on each query result in parallel using the given `task_pool`. + /// Runs `func` on each query result in parallel. /// /// This can only be called for read-only queries, see [`Self::par_for_each_mut`] for /// write-queries. + /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. #[inline] pub fn par_for_each<'w, FN: Fn(ROQueryItem<'w, Q>) + Send + Sync + Clone>( &mut self, world: &'w World, - task_pool: &TaskPool, batch_size: usize, func: FN, ) { @@ -702,7 +773,6 @@ impl QueryState { self.update_archetypes(world); self.par_for_each_unchecked_manual::, FN>( world, - task_pool, batch_size, func, world.last_change_tick(), @@ -711,12 +781,15 @@ impl QueryState { } } - /// Runs `func` on each query result in parallel using the given `task_pool`. + /// Runs `func` on each query result in parallel. + /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. #[inline] pub fn par_for_each_mut<'w, FN: Fn(QueryItem<'w, Q>) + Send + Sync + Clone>( &mut self, world: &'w mut World, - task_pool: &TaskPool, batch_size: usize, func: FN, ) { @@ -725,7 +798,6 @@ impl QueryState { self.update_archetypes(world); self.par_for_each_unchecked_manual::, FN>( world, - task_pool, batch_size, func, world.last_change_tick(), @@ -734,10 +806,14 @@ impl QueryState { } } - /// Runs `func` on each query result in parallel using the given `task_pool`. + /// Runs `func` on each query result in parallel. /// /// This can only be called for read-only queries. /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// /// # Safety /// /// This does not check for mutable query correctness. To be safe, make sure mutable queries @@ -746,14 +822,12 @@ impl QueryState { pub unsafe fn par_for_each_unchecked<'w, FN: Fn(QueryItem<'w, Q>) + Send + Sync + Clone>( &mut self, world: &'w World, - task_pool: &TaskPool, batch_size: usize, func: FN, ) { self.update_archetypes(world); self.par_for_each_unchecked_manual::, FN>( world, - task_pool, batch_size, func, world.last_change_tick(), @@ -761,6 +835,29 @@ impl QueryState { ); } + /// Runs `func` on each query result where the entities match. + #[inline] + pub fn many_for_each_mut( + &mut self, + world: &mut World, + entities: EntityList, + func: impl FnMut(QueryItem<'_, Q>), + ) where + EntityList::Item: Borrow, + { + // SAFETY: query has unique world access + unsafe { + self.update_archetypes(world); + self.many_for_each_unchecked_manual( + world, + entities, + func, + world.last_change_tick(), + world.read_change_tick(), + ); + }; + } + /// Runs `func` on each query result for the given [`World`], where the last change and /// the current change tick are given. This is faster than the equivalent /// iter() method, but cannot be chained like a normal [`Iterator`]. @@ -783,7 +880,7 @@ impl QueryState { change_tick: u32, ) { // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual + // QueryIter, QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::many_for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual let mut fetch = QF::init(world, &self.fetch_state, last_change_tick, change_tick); let mut filter = as Fetch>::init( world, @@ -829,6 +926,10 @@ impl QueryState { /// the current change tick are given. This is faster than the equivalent /// iter() method, but cannot be chained like a normal [`Iterator`]. /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// /// # Safety /// /// This does not check for mutable query correctness. To be safe, make sure mutable queries @@ -842,15 +943,14 @@ impl QueryState { >( &self, world: &'w World, - task_pool: &TaskPool, batch_size: usize, func: FN, last_change_tick: u32, change_tick: u32, ) { // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual - task_pool.scope(|scope| { + // QueryIter, QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::many_for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual + ComputeTaskPool::get().scope(|scope| { if QF::IS_DENSE && >::IS_DENSE { let tables = &world.storages().tables; for table_id in &self.matched_table_ids { @@ -940,6 +1040,192 @@ impl QueryState { } }); } + + /// Runs `func` on each query result for the given [`World`] and list of [`Entity`]'s, where the last change and + /// the current change tick are given. This is faster than the equivalent + /// iter() method, but cannot be chained like a normal [`Iterator`]. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + pub(crate) unsafe fn many_for_each_unchecked_manual( + &self, + world: &World, + entity_list: EntityList, + mut func: impl FnMut(QueryItem<'_, Q>), + last_change_tick: u32, + change_tick: u32, + ) where + EntityList::Item: Borrow, + { + // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: + // QueryIter, QueryIterationCursor, QueryState::for_each_unchecked_manual, QueryState::many_for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual + let mut fetch = + as Fetch>::init(world, &self.fetch_state, last_change_tick, change_tick); + let mut filter = as Fetch>::init( + world, + &self.filter_state, + last_change_tick, + change_tick, + ); + + let tables = &world.storages.tables; + + for entity in entity_list.into_iter() { + let location = match world.entities.get(*entity.borrow()) { + Some(location) => location, + None => continue, + }; + + if !self + .matched_archetypes + .contains(location.archetype_id.index()) + { + continue; + } + + let archetype = &world.archetypes[location.archetype_id]; + + fetch.set_archetype(&self.fetch_state, archetype, tables); + filter.set_archetype(&self.filter_state, archetype, tables); + if filter.archetype_filter_fetch(location.index) { + func(fetch.archetype_fetch(location.index)); + } + } + } + + /// Returns a single immutable query result when there is exactly one entity matching + /// the query. + /// + /// This can only be called for read-only queries, + /// see [`single_mut`](Self::single_mut) for write-queries. + /// + /// # Panics + /// + /// Panics if the number of query results is not exactly one. Use + /// [`get_single`](Self::get_single) to return a `Result` instead of panicking. + #[track_caller] + #[inline] + pub fn single<'w>(&mut self, world: &'w World) -> ROQueryItem<'w, Q> { + self.get_single(world).unwrap() + } + + /// Returns a single immutable query result when there is exactly one entity matching + /// the query. + /// + /// This can only be called for read-only queries, + /// see [`get_single_mut`](Self::get_single_mut) for write-queries. + /// + /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned + /// instead. + #[inline] + pub fn get_single<'w>( + &mut self, + world: &'w World, + ) -> Result, QuerySingleError> { + self.update_archetypes(world); + + // SAFETY: query is read only + unsafe { + self.get_single_unchecked_manual::>( + world, + world.last_change_tick(), + world.read_change_tick(), + ) + } + } + + /// Returns a single mutable query result when there is exactly one entity matching + /// the query. + /// + /// # Panics + /// + /// Panics if the number of query results is not exactly one. Use + /// [`get_single_mut`](Self::get_single_mut) to return a `Result` instead of panicking. + #[track_caller] + #[inline] + pub fn single_mut<'w>(&mut self, world: &'w mut World) -> QueryItem<'w, Q> { + // SAFETY: query has unique world access + self.get_single_mut(world).unwrap() + } + + /// Returns a single mutable query result when there is exactly one entity matching + /// the query. + /// + /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned + /// instead. + #[inline] + pub fn get_single_mut<'w>( + &mut self, + world: &'w mut World, + ) -> Result, QuerySingleError> { + self.update_archetypes(world); + + // SAFETY: query has unique world access + unsafe { + self.get_single_unchecked_manual::>( + world, + world.last_change_tick(), + world.read_change_tick(), + ) + } + } + + /// Returns a query result when there is exactly one entity matching the query. + /// + /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned + /// instead. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + #[inline] + pub unsafe fn get_single_unchecked<'w>( + &mut self, + world: &'w World, + ) -> Result, QuerySingleError> { + self.update_archetypes(world); + + self.get_single_unchecked_manual::>( + world, + world.last_change_tick(), + world.read_change_tick(), + ) + } + + /// Returns a query result when there is exactly one entity matching the query, + /// where the last change and the current change tick are given. + /// + /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned + /// instead. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + #[inline] + pub unsafe fn get_single_unchecked_manual<'w, QF: Fetch<'w, State = Q::State>>( + &self, + world: &'w World, + last_change_tick: u32, + change_tick: u32, + ) -> Result { + let mut query = self.iter_unchecked_manual::(world, last_change_tick, change_tick); + let first = query.next(); + let extra = query.next().is_some(); + + match (first, extra) { + (Some(r), false) => Ok(r), + (None, _) => Err(QuerySingleError::NoEntities(std::any::type_name::())), + (Some(_), _) => Err(QuerySingleError::MultipleEntities(std::any::type_name::< + Self, + >())), + } + } } /// An error that occurs when retrieving a specific [`Entity`]'s query result. @@ -1070,3 +1356,24 @@ mod tests { let _panics = query_state.get_many_mut(&mut world_2, []); } } + +/// An error that occurs when evaluating a [`QueryState`] as a single expected resulted via +/// [`QueryState::single`] or [`QueryState::single_mut`]. +#[derive(Debug)] +pub enum QuerySingleError { + NoEntities(&'static str), + MultipleEntities(&'static str), +} + +impl std::error::Error for QuerySingleError {} + +impl std::fmt::Display for QuerySingleError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + QuerySingleError::NoEntities(query) => write!(f, "No entities fit the query {}", query), + QuerySingleError::MultipleEntities(query) => { + write!(f, "Multiple entities fit the query {}!", query) + } + } + } +} diff --git a/crates/bevy_ecs/src/reflect.rs b/crates/bevy_ecs/src/reflect.rs index 4881dd83f0edd..bcd41971c7c9e 100644 --- a/crates/bevy_ecs/src/reflect.rs +++ b/crates/bevy_ecs/src/reflect.rs @@ -8,6 +8,7 @@ use crate::{ }; use bevy_reflect::{ impl_from_reflect_value, impl_reflect_value, FromType, Reflect, ReflectDeserialize, + ReflectSerialize, }; #[derive(Clone)] diff --git a/crates/bevy_ecs/src/schedule/executor_parallel.rs b/crates/bevy_ecs/src/schedule/executor_parallel.rs index 149d8d02bc2df..c82924b0e276c 100644 --- a/crates/bevy_ecs/src/schedule/executor_parallel.rs +++ b/crates/bevy_ecs/src/schedule/executor_parallel.rs @@ -123,10 +123,7 @@ impl ParallelSystemExecutor for ParallelExecutor { } } - let compute_pool = world - .get_resource_or_insert_with(|| ComputeTaskPool(TaskPool::default())) - .clone(); - compute_pool.scope(|scope| { + ComputeTaskPool::init(TaskPool::default).scope(|scope| { self.prepare_systems(scope, systems, world); let parallel_executor = async { // All systems have been ran if there are no queued or running systems. diff --git a/crates/bevy_ecs/src/schedule/stage.rs b/crates/bevy_ecs/src/schedule/stage.rs index 210ac514c2085..ff26ced892a6f 100644 --- a/crates/bevy_ecs/src/schedule/stage.rs +++ b/crates/bevy_ecs/src/schedule/stage.rs @@ -47,6 +47,7 @@ impl_downcast!(Stage); /// /// The checker may report a system more times than the amount of constraints it would actually need /// to have unambiguous order with regards to a group of already-constrained systems. +#[derive(Default)] pub struct ReportExecutionOrderAmbiguities; /// Stores and executes systems. Execution order is not defined unless explicitly specified; @@ -925,10 +926,9 @@ impl Stage for SystemStage { } } match criteria.should_run { - ShouldRun::Yes => { - run_system_loop = true; - } - ShouldRun::YesAndCheckAgain | ShouldRun::NoAndCheckAgain => { + ShouldRun::Yes + | ShouldRun::YesAndCheckAgain + | ShouldRun::NoAndCheckAgain => { run_system_loop = true; } ShouldRun::No => (), diff --git a/crates/bevy_ecs/src/schedule/state.rs b/crates/bevy_ecs/src/schedule/state.rs index 3c9ddb37522ba..1693f0adce7e3 100644 --- a/crates/bevy_ecs/src/schedule/state.rs +++ b/crates/bevy_ecs/src/schedule/state.rs @@ -133,13 +133,8 @@ where let pred_clone = pred.clone(); (move |state: Res>, mut is_in_stack: Local| match &state.transition { Some(StateTransition::Entering(ref relevant, _)) - | Some(StateTransition::ExitingToResume(_, ref relevant)) => { - if relevant == &pred { - *is_in_stack = !*is_in_stack; - } - false - } - Some(StateTransition::ExitingFull(_, ref relevant)) => { + | Some(StateTransition::ExitingToResume(_, ref relevant)) + | Some(StateTransition::ExitingFull(_, ref relevant)) => { if relevant == &pred { *is_in_stack = !*is_in_stack; } @@ -267,14 +262,14 @@ where } /// Schedule a state change that replaces the active state with the given state. - /// This will fail if there is a scheduled operation, or if the given `state` matches the - /// current state + /// This will fail if there is a scheduled operation, pending transition, or if the given + /// `state` matches the current state pub fn set(&mut self, state: T) -> Result<(), StateError> { if self.stack.last().unwrap() == &state { return Err(StateError::AlreadyInState); } - if self.scheduled.is_some() { + if self.scheduled.is_some() || self.transition.is_some() { return Err(StateError::StateAlreadyQueued); } @@ -294,14 +289,14 @@ where } /// Schedule a state change that replaces the full stack with the given state. - /// This will fail if there is a scheduled operation, or if the given `state` matches the - /// current state + /// This will fail if there is a scheduled operation, pending transition, or if the given + /// `state` matches the current state pub fn replace(&mut self, state: T) -> Result<(), StateError> { if self.stack.last().unwrap() == &state { return Err(StateError::AlreadyInState); } - if self.scheduled.is_some() { + if self.scheduled.is_some() || self.transition.is_some() { return Err(StateError::StateAlreadyQueued); } @@ -326,7 +321,7 @@ where return Err(StateError::AlreadyInState); } - if self.scheduled.is_some() { + if self.scheduled.is_some() || self.transition.is_some() { return Err(StateError::StateAlreadyQueued); } @@ -347,7 +342,7 @@ where /// Same as [`Self::set`], but does a pop operation instead of a set operation pub fn pop(&mut self) -> Result<(), StateError> { - if self.scheduled.is_some() { + if self.scheduled.is_some() || self.transition.is_some() { return Err(StateError::StateAlreadyQueued); } @@ -370,9 +365,9 @@ where } /// Schedule a state change that restarts the active state. - /// This will fail if there is a scheduled operation + /// This will fail if there is a scheduled operation or a pending transition pub fn restart(&mut self) -> Result<(), StateError> { - if self.scheduled.is_some() { + if self.scheduled.is_some() || self.transition.is_some() { return Err(StateError::StateAlreadyQueued); } diff --git a/crates/bevy_ecs/src/storage/blob_vec.rs b/crates/bevy_ecs/src/storage/blob_vec.rs index a56e71023eec4..8ce77d73a64a7 100644 --- a/crates/bevy_ecs/src/storage/blob_vec.rs +++ b/crates/bevy_ecs/src/storage/blob_vec.rs @@ -9,7 +9,7 @@ use bevy_ptr::{OwningPtr, Ptr, PtrMut}; /// A flat, type-erased data storage type /// /// Used to densely store homogeneous ECS data. -pub struct BlobVec { +pub(super) struct BlobVec { item_layout: Layout, capacity: usize, /// Number of elements, not bytes @@ -86,6 +86,11 @@ impl BlobVec { self.capacity } + #[inline] + pub fn layout(&self) -> Layout { + self.item_layout + } + pub fn reserve_exact(&mut self, additional: usize) { let available_space = self.capacity - self.len; if available_space < additional { @@ -352,7 +357,7 @@ mod tests { // SAFETY: The pointer points to a valid value of type `T` and it is safe to drop this value. unsafe fn drop_ptr(x: OwningPtr<'_>) { - x.drop_as::() + x.drop_as::(); } /// # Safety diff --git a/crates/bevy_ecs/src/storage/mod.rs b/crates/bevy_ecs/src/storage/mod.rs index f54a2275bfe07..571f05184cdbd 100644 --- a/crates/bevy_ecs/src/storage/mod.rs +++ b/crates/bevy_ecs/src/storage/mod.rs @@ -4,7 +4,6 @@ mod blob_vec; mod sparse_set; mod table; -pub use blob_vec::*; pub use sparse_set::*; pub use table::*; diff --git a/crates/bevy_ecs/src/storage/sparse_set.rs b/crates/bevy_ecs/src/storage/sparse_set.rs index e21a4f4723c14..1d5172b64a624 100644 --- a/crates/bevy_ecs/src/storage/sparse_set.rs +++ b/crates/bevy_ecs/src/storage/sparse_set.rs @@ -1,13 +1,15 @@ use crate::{ component::{ComponentId, ComponentInfo, ComponentTicks}, entity::Entity, - storage::BlobVec, + storage::Column, }; use bevy_ptr::{OwningPtr, Ptr}; use std::{cell::UnsafeCell, hash::Hash, marker::PhantomData}; +type EntityId = u32; + #[derive(Debug)] -pub struct SparseArray { +pub(crate) struct SparseArray { values: Vec>, marker: PhantomData, } @@ -29,13 +31,6 @@ impl SparseArray { } impl SparseArray { - pub fn with_capacity(capacity: usize) -> Self { - Self { - values: Vec::with_capacity(capacity), - marker: PhantomData, - } - } - #[inline] pub fn insert(&mut self, index: I, value: V) { let index = index.sparse_set_index(); @@ -72,18 +67,6 @@ impl SparseArray { self.values.get_mut(index).and_then(|value| value.take()) } - #[inline] - pub fn get_or_insert_with(&mut self, index: I, func: impl FnOnce() -> V) -> &mut V { - let index = index.sparse_set_index(); - if index < self.values.len() { - return self.values[index].get_or_insert_with(func); - } - self.values.resize_with(index + 1, || None); - let value = &mut self.values[index]; - *value = Some(func()); - value.as_mut().unwrap() - } - pub fn clear(&mut self) { self.values.clear(); } @@ -94,28 +77,28 @@ impl SparseArray { /// Designed for relatively fast insertions and deletions. #[derive(Debug)] pub struct ComponentSparseSet { - dense: BlobVec, - ticks: Vec>, + dense: Column, + // Internally this only relies on the Entity ID to keep track of where the component data is + // stored for entities that are alive. The generation is not required, but is stored + // in debug builds to validate that access is correct. + #[cfg(not(debug_assertions))] + entities: Vec, + #[cfg(debug_assertions)] entities: Vec, - sparse: SparseArray, + sparse: SparseArray, } impl ComponentSparseSet { - pub fn new(component_info: &ComponentInfo, capacity: usize) -> Self { + pub(crate) fn new(component_info: &ComponentInfo, capacity: usize) -> Self { Self { - // SAFE: component_info.drop() is compatible with the items that will be inserted. - dense: unsafe { - BlobVec::new(component_info.layout(), component_info.drop(), capacity) - }, - ticks: Vec::with_capacity(capacity), + dense: Column::with_capacity(component_info, capacity), entities: Vec::with_capacity(capacity), sparse: Default::default(), } } - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.dense.clear(); - self.ticks.clear(); self.entities.clear(); self.sparse.clear(); } @@ -136,83 +119,114 @@ impl ComponentSparseSet { /// # Safety /// The `value` pointer must point to a valid address that matches the [`Layout`](std::alloc::Layout) /// inside the [`ComponentInfo`] given when constructing this sparse set. - pub unsafe fn insert(&mut self, entity: Entity, value: OwningPtr<'_>, change_tick: u32) { - if let Some(&dense_index) = self.sparse.get(entity) { - self.dense.replace_unchecked(dense_index, value); - *self.ticks.get_unchecked_mut(dense_index) = - UnsafeCell::new(ComponentTicks::new(change_tick)); + pub(crate) unsafe fn insert(&mut self, entity: Entity, value: OwningPtr<'_>, change_tick: u32) { + if let Some(&dense_index) = self.sparse.get(entity.id()) { + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index as usize]); + self.dense.replace(dense_index as usize, value, change_tick); } else { let dense_index = self.dense.len(); - self.dense.push(value); - self.sparse.insert(entity, dense_index); - debug_assert_eq!(self.ticks.len(), dense_index); - debug_assert_eq!(self.entities.len(), dense_index); - self.ticks - .push(UnsafeCell::new(ComponentTicks::new(change_tick))); + self.dense.push(value, ComponentTicks::new(change_tick)); + self.sparse.insert(entity.id(), dense_index as u32); + #[cfg(debug_assertions)] + assert_eq!(self.entities.len(), dense_index); + #[cfg(not(debug_assertions))] + self.entities.push(entity.id()); + #[cfg(debug_assertions)] self.entities.push(entity); } } #[inline] pub fn contains(&self, entity: Entity) -> bool { - self.sparse.contains(entity) + #[cfg(debug_assertions)] + { + if let Some(&dense_index) = self.sparse.get(entity.id()) { + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index as usize]); + true + } else { + false + } + } + #[cfg(not(debug_assertions))] + self.sparse.contains(entity.id()) } #[inline] pub fn get(&self, entity: Entity) -> Option> { - self.sparse.get(entity).map(|dense_index| { + self.sparse.get(entity.id()).map(|dense_index| { + let dense_index = *dense_index as usize; + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index]); // SAFE: if the sparse index points to something in the dense vec, it exists - unsafe { self.dense.get_unchecked(*dense_index) } + unsafe { self.dense.get_data_unchecked(dense_index) } }) } #[inline] pub fn get_with_ticks(&self, entity: Entity) -> Option<(Ptr<'_>, &UnsafeCell)> { - let dense_index = *self.sparse.get(entity)?; + let dense_index = *self.sparse.get(entity.id())? as usize; + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index]); // SAFE: if the sparse index points to something in the dense vec, it exists unsafe { Some(( - self.dense.get_unchecked(dense_index), - self.ticks.get_unchecked(dense_index), + self.dense.get_data_unchecked(dense_index), + self.dense.get_ticks_unchecked(dense_index), )) } } #[inline] pub fn get_ticks(&self, entity: Entity) -> Option<&UnsafeCell> { - let dense_index = *self.sparse.get(entity)?; + let dense_index = *self.sparse.get(entity.id())? as usize; + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index]); // SAFE: if the sparse index points to something in the dense vec, it exists - unsafe { Some(self.ticks.get_unchecked(dense_index)) } + unsafe { Some(self.dense.get_ticks_unchecked(dense_index)) } } /// Removes the `entity` from this sparse set and returns a pointer to the associated value (if /// it exists). #[must_use = "The returned pointer must be used to drop the removed component."] - pub fn remove_and_forget(&mut self, entity: Entity) -> Option> { - self.sparse.remove(entity).map(|dense_index| { - self.ticks.swap_remove(dense_index); + pub(crate) fn remove_and_forget(&mut self, entity: Entity) -> Option> { + self.sparse.remove(entity.id()).map(|dense_index| { + let dense_index = dense_index as usize; + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index]); self.entities.swap_remove(dense_index); let is_last = dense_index == self.dense.len() - 1; // SAFE: dense_index was just removed from `sparse`, which ensures that it is valid - let value = unsafe { self.dense.swap_remove_and_forget_unchecked(dense_index) }; + let (value, _) = unsafe { self.dense.swap_remove_and_forget_unchecked(dense_index) }; if !is_last { let swapped_entity = self.entities[dense_index]; - *self.sparse.get_mut(swapped_entity).unwrap() = dense_index; + #[cfg(not(debug_assertions))] + let idx = swapped_entity; + #[cfg(debug_assertions)] + let idx = swapped_entity.id(); + *self.sparse.get_mut(idx).unwrap() = dense_index as u32; } value }) } - pub fn remove(&mut self, entity: Entity) -> bool { - if let Some(dense_index) = self.sparse.remove(entity) { - self.ticks.swap_remove(dense_index); + pub(crate) fn remove(&mut self, entity: Entity) -> bool { + if let Some(dense_index) = self.sparse.remove(entity.id()) { + let dense_index = dense_index as usize; + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index]); self.entities.swap_remove(dense_index); let is_last = dense_index == self.dense.len() - 1; // SAFE: if the sparse index points to something in the dense vec, it exists - unsafe { self.dense.swap_remove_and_drop_unchecked(dense_index) } + unsafe { self.dense.swap_remove_unchecked(dense_index) } if !is_last { let swapped_entity = self.entities[dense_index]; - *self.sparse.get_mut(swapped_entity).unwrap() = dense_index; + #[cfg(not(debug_assertions))] + let idx = swapped_entity; + #[cfg(debug_assertions)] + let idx = swapped_entity.id(); + *self.sparse.get_mut(idx).unwrap() = dense_index as u32; } true } else { @@ -221,9 +235,7 @@ impl ComponentSparseSet { } pub(crate) fn check_change_ticks(&mut self, change_tick: u32) { - for component_ticks in &mut self.ticks { - component_ticks.get_mut().check_ticks(change_tick); - } + self.dense.check_change_ticks(change_tick); } } @@ -277,28 +289,6 @@ impl SparseSet { self.indices.push(index); self.dense.push(value); } - - // PERF: switch to this. it's faster but it has an invalid memory access on - // table_add_remove_many let dense = &mut self.dense; - // let indices = &mut self.indices; - // let dense_index = *self.sparse.get_or_insert_with(index.clone(), move || { - // if dense.len() == dense.capacity() { - // dense.reserve(64); - // indices.reserve(64); - // } - // let len = dense.len(); - // // SAFE: we set the index immediately - // unsafe { - // dense.set_len(len + 1); - // indices.set_len(len + 1); - // } - // len - // }); - // // SAFE: index either already existed or was just allocated - // unsafe { - // *self.dense.get_unchecked_mut(dense_index) = value; - // *self.indices.get_unchecked_mut(dense_index) = index; - // } } pub fn get_or_insert_with(&mut self, index: I, func: impl FnOnce() -> V) -> &mut V { @@ -370,6 +360,14 @@ impl SparseSet { pub fn values_mut(&mut self) -> impl Iterator { self.dense.iter_mut() } + + pub fn iter(&self) -> impl Iterator { + self.indices.iter().zip(self.dense.iter()) + } + + pub fn iter_mut(&mut self) -> impl Iterator { + self.indices.iter().zip(self.dense.iter_mut()) + } } pub trait SparseSetIndex: Clone + PartialEq + Eq + Hash { diff --git a/crates/bevy_ecs/src/storage/table.rs b/crates/bevy_ecs/src/storage/table.rs index 755644998decf..37ee9498656c0 100644 --- a/crates/bevy_ecs/src/storage/table.rs +++ b/crates/bevy_ecs/src/storage/table.rs @@ -1,10 +1,11 @@ use crate::{ component::{ComponentId, ComponentInfo, ComponentTicks, Components}, entity::Entity, - storage::{BlobVec, SparseSet}, + storage::{blob_vec::BlobVec, SparseSet}, }; use bevy_ptr::{OwningPtr, Ptr, PtrMut}; use bevy_utils::HashMap; +use std::alloc::Layout; use std::{ cell::UnsafeCell, ops::{Index, IndexMut}, @@ -30,23 +31,27 @@ impl TableId { } } +#[derive(Debug)] pub struct Column { - pub(crate) component_id: ComponentId, - pub(crate) data: BlobVec, - pub(crate) ticks: Vec>, + data: BlobVec, + ticks: Vec>, } impl Column { #[inline] - pub fn with_capacity(component_info: &ComponentInfo, capacity: usize) -> Self { + pub(crate) fn with_capacity(component_info: &ComponentInfo, capacity: usize) -> Self { Column { - component_id: component_info.id(), // SAFE: component_info.drop() is valid for the types that will be inserted. data: unsafe { BlobVec::new(component_info.layout(), component_info.drop(), capacity) }, ticks: Vec::with_capacity(capacity), } } + #[inline] + pub fn item_layout(&self) -> Layout { + self.data.layout() + } + /// Writes component data to the column at given row. /// Assumes the slot is uninitialized, drop is not called. /// To overwrite existing initialized value, use `replace` instead. @@ -54,7 +59,12 @@ impl Column { /// # Safety /// Assumes data has already been allocated for the given row. #[inline] - pub unsafe fn initialize(&mut self, row: usize, data: OwningPtr<'_>, ticks: ComponentTicks) { + pub(crate) unsafe fn initialize( + &mut self, + row: usize, + data: OwningPtr<'_>, + ticks: ComponentTicks, + ) { debug_assert!(row < self.len()); self.data.initialize_unchecked(row, data); *self.ticks.get_unchecked_mut(row).get_mut() = ticks; @@ -66,7 +76,7 @@ impl Column { /// # Safety /// Assumes data has already been allocated for the given row. #[inline] - pub unsafe fn replace(&mut self, row: usize, data: OwningPtr<'_>, change_tick: u32) { + pub(crate) unsafe fn replace(&mut self, row: usize, data: OwningPtr<'_>, change_tick: u32) { debug_assert!(row < self.len()); self.data.replace_unchecked(row, data); self.ticks @@ -75,14 +85,6 @@ impl Column { .set_changed(change_tick); } - /// # Safety - /// Assumes data has already been allocated for the given row. - #[inline] - pub unsafe fn initialize_data(&mut self, row: usize, data: OwningPtr<'_>) { - debug_assert!(row < self.len()); - self.data.initialize_unchecked(row, data); - } - #[inline] pub fn len(&self) -> usize { self.data.len() @@ -96,7 +98,7 @@ impl Column { /// # Safety /// index must be in-bounds #[inline] - pub unsafe fn get_ticks_unchecked_mut(&mut self, row: usize) -> &mut ComponentTicks { + pub(crate) unsafe fn get_ticks_unchecked_mut(&mut self, row: usize) -> &mut ComponentTicks { debug_assert!(row < self.len()); self.ticks.get_unchecked_mut(row).get_mut() } @@ -162,7 +164,7 @@ impl Column { /// - index must be in-bounds /// - no other reference to the data of the same row can exist at the same time #[inline] - pub unsafe fn get_data_unchecked_mut(&mut self, row: usize) -> PtrMut<'_> { + pub(crate) unsafe fn get_data_unchecked_mut(&mut self, row: usize) -> PtrMut<'_> { debug_assert!(row < self.data.len()); self.data.get_unchecked_mut(row) } @@ -194,14 +196,7 @@ pub struct Table { } impl Table { - pub const fn new() -> Table { - Self { - columns: SparseSet::new(), - entities: Vec::new(), - } - } - - pub fn with_capacity(capacity: usize, column_capacity: usize) -> Table { + pub(crate) fn with_capacity(capacity: usize, column_capacity: usize) -> Table { Self { columns: SparseSet::with_capacity(column_capacity), entities: Vec::with_capacity(capacity), @@ -213,7 +208,7 @@ impl Table { &self.entities } - pub fn add_column(&mut self, component_info: &ComponentInfo) { + pub(crate) fn add_column(&mut self, component_info: &ComponentInfo) { self.columns.insert( component_info.id(), Column::with_capacity(component_info, self.entities.capacity()), @@ -225,7 +220,7 @@ impl Table { /// /// # Safety /// `row` must be in-bounds - pub unsafe fn swap_remove_unchecked(&mut self, row: usize) -> Option { + pub(crate) unsafe fn swap_remove_unchecked(&mut self, row: usize) -> Option { for column in self.columns.values_mut() { column.swap_remove_unchecked(row); } @@ -245,7 +240,7 @@ impl Table { /// /// # Safety /// Row must be in-bounds - pub unsafe fn move_to_and_forget_missing_unchecked( + pub(crate) unsafe fn move_to_and_forget_missing_unchecked( &mut self, row: usize, new_table: &mut Table, @@ -253,10 +248,9 @@ impl Table { debug_assert!(row < self.len()); let is_last = row == self.entities.len() - 1; let new_row = new_table.allocate(self.entities.swap_remove(row)); - for column in self.columns.values_mut() { - let component_id = column.component_id; + for (component_id, column) in self.columns.iter_mut() { let (data, ticks) = column.swap_remove_and_forget_unchecked(row); - if let Some(new_column) = new_table.get_column_mut(component_id) { + if let Some(new_column) = new_table.get_column_mut(*component_id) { new_column.initialize(new_row, data, ticks); } } @@ -276,7 +270,7 @@ impl Table { /// /// # Safety /// row must be in-bounds - pub unsafe fn move_to_and_drop_missing_unchecked( + pub(crate) unsafe fn move_to_and_drop_missing_unchecked( &mut self, row: usize, new_table: &mut Table, @@ -284,8 +278,8 @@ impl Table { debug_assert!(row < self.len()); let is_last = row == self.entities.len() - 1; let new_row = new_table.allocate(self.entities.swap_remove(row)); - for column in self.columns.values_mut() { - if let Some(new_column) = new_table.get_column_mut(column.component_id) { + for (component_id, column) in self.columns.iter_mut() { + if let Some(new_column) = new_table.get_column_mut(*component_id) { let (data, ticks) = column.swap_remove_and_forget_unchecked(row); new_column.initialize(new_row, data, ticks); } else { @@ -308,7 +302,7 @@ impl Table { /// /// # Safety /// `row` must be in-bounds. `new_table` must contain every component this table has - pub unsafe fn move_to_superset_unchecked( + pub(crate) unsafe fn move_to_superset_unchecked( &mut self, row: usize, new_table: &mut Table, @@ -316,8 +310,8 @@ impl Table { debug_assert!(row < self.len()); let is_last = row == self.entities.len() - 1; let new_row = new_table.allocate(self.entities.swap_remove(row)); - for column in self.columns.values_mut() { - let new_column = new_table.get_column_mut(column.component_id).unwrap(); + for (component_id, column) in self.columns.iter_mut() { + let new_column = new_table.get_column_mut(*component_id).unwrap(); let (data, ticks) = column.swap_remove_and_forget_unchecked(row); new_column.initialize(new_row, data, ticks); } @@ -337,7 +331,7 @@ impl Table { } #[inline] - pub fn get_column_mut(&mut self, component_id: ComponentId) -> Option<&mut Column> { + pub(crate) fn get_column_mut(&mut self, component_id: ComponentId) -> Option<&mut Column> { self.columns.get_mut(component_id) } @@ -346,7 +340,7 @@ impl Table { self.columns.contains(component_id) } - pub fn reserve(&mut self, additional: usize) { + pub(crate) fn reserve(&mut self, additional: usize) { if self.entities.capacity() - self.entities.len() < additional { self.entities.reserve(additional); @@ -363,7 +357,7 @@ impl Table { /// /// # Safety /// the allocated row must be written to immediately with valid values in each column - pub unsafe fn allocate(&mut self, entity: Entity) -> usize { + pub(crate) unsafe fn allocate(&mut self, entity: Entity) -> usize { self.reserve(1); let index = self.entities.len(); self.entities.push(entity); @@ -399,7 +393,7 @@ impl Table { self.columns.values() } - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.entities.clear(); for column in self.columns.values_mut() { column.clear(); @@ -425,7 +419,7 @@ impl Default for Tables { } } -pub struct TableMoveResult { +pub(crate) struct TableMoveResult { pub swapped_entity: Option, pub new_row: usize, } @@ -446,11 +440,6 @@ impl Tables { self.tables.get(id.index()) } - #[inline] - pub fn get_mut(&mut self, id: TableId) -> Option<&mut Table> { - self.tables.get_mut(id.index()) - } - #[inline] pub(crate) fn get_2_mut(&mut self, a: TableId, b: TableId) -> (&mut Table, &mut Table) { if a.index() > b.index() { @@ -464,7 +453,7 @@ impl Tables { /// # Safety /// `component_ids` must contain components that exist in `components` - pub unsafe fn get_id_or_insert( + pub(crate) unsafe fn get_id_or_insert( &mut self, component_ids: &[ComponentId], components: &Components, @@ -490,11 +479,7 @@ impl Tables { self.tables.iter() } - pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, Table> { - self.tables.iter_mut() - } - - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { for table in &mut self.tables { table.clear(); } @@ -529,7 +514,11 @@ mod tests { use crate::component::Component; use crate::ptr::OwningPtr; use crate::storage::Storages; - use crate::{component::Components, entity::Entity, storage::Table}; + use crate::{ + component::{ComponentTicks, Components}, + entity::Entity, + storage::Table, + }; #[derive(Component)] struct W(T); @@ -548,10 +537,11 @@ mod tests { let row = table.allocate(*entity); let value: W = W(row); OwningPtr::make(value, |value_ptr| { - table - .get_column_mut(component_id) - .unwrap() - .initialize_data(row, value_ptr); + table.get_column_mut(component_id).unwrap().initialize( + row, + value_ptr, + ComponentTicks::new(0), + ); }); }; } diff --git a/crates/bevy_ecs/src/system/commands/command_queue.rs b/crates/bevy_ecs/src/system/commands/command_queue.rs index 64a3bd7914a6f..d73b529253f66 100644 --- a/crates/bevy_ecs/src/system/commands/command_queue.rs +++ b/crates/bevy_ecs/src/system/commands/command_queue.rs @@ -1,21 +1,23 @@ +use std::mem::{ManuallyDrop, MaybeUninit}; + use super::Command; use crate::world::World; struct CommandMeta { offset: usize, - func: unsafe fn(value: *mut u8, world: &mut World), + func: unsafe fn(value: *mut MaybeUninit, world: &mut World), } /// A queue of [`Command`]s // -// NOTE: [`CommandQueue`] is implemented via a `Vec` over a `Vec>` +// NOTE: [`CommandQueue`] is implemented via a `Vec>` over a `Vec>` // as an optimization. Since commands are used frequently in systems as a way to spawn // entities/components/resources, and it's not currently possible to parallelize these // due to mutable [`World`] access, maximizing performance for [`CommandQueue`] is // preferred to simplicity of implementation. #[derive(Default)] pub struct CommandQueue { - bytes: Vec, + bytes: Vec>, metas: Vec, } @@ -35,7 +37,7 @@ impl CommandQueue { /// SAFE: This function is only every called when the `command` bytes is the associated /// [`Commands`] `T` type. Also this only reads the data via `read_unaligned` so unaligned /// accesses are safe. - unsafe fn write_command(command: *mut u8, world: &mut World) { + unsafe fn write_command(command: *mut MaybeUninit, world: &mut World) { let command = command.cast::().read_unaligned(); command.write(world); } @@ -48,25 +50,30 @@ impl CommandQueue { func: write_command::, }); + // Use `ManuallyDrop` to forget `command` right away, avoiding + // any use of it after the `ptr::copy_nonoverlapping`. + let command = ManuallyDrop::new(command); + if size > 0 { self.bytes.reserve(size); // SAFE: The internal `bytes` vector has enough storage for the - // command (see the call the `reserve` above), and the vector has - // its length set appropriately. - // Also `command` is forgotten at the end of this function so that - // when `apply` is called later, a double `drop` does not occur. + // command (see the call the `reserve` above), the vector has + // its length set appropriately and can contain any kind of bytes. + // In case we're writing a ZST and the `Vec` hasn't allocated yet + // then `as_mut_ptr` will be a dangling (non null) pointer, and + // thus valid for ZST writes. + // Also `command` is forgotten so that when `apply` is called + // later, a double `drop` does not occur. unsafe { std::ptr::copy_nonoverlapping( - &command as *const C as *const u8, + &*command as *const C as *const MaybeUninit, self.bytes.as_mut_ptr().add(old_len), size, ); self.bytes.set_len(old_len + size); } } - - std::mem::forget(command); } /// Execute the queued [`Command`]s in the world. @@ -81,27 +88,12 @@ impl CommandQueue { // unnecessary allocations. unsafe { self.bytes.set_len(0) }; - let byte_ptr = if self.bytes.as_mut_ptr().is_null() { - // SAFE: If the vector's buffer pointer is `null` this mean nothing has been pushed to its bytes. - // This means either that: - // - // 1) There are no commands so this pointer will never be read/written from/to. - // - // 2) There are only zero-sized commands pushed. - // According to https://doc.rust-lang.org/std/ptr/index.html - // "The canonical way to obtain a pointer that is valid for zero-sized accesses is NonNull::dangling" - // therefore it is safe to call `read_unaligned` on a pointer produced from `NonNull::dangling` for - // zero-sized commands. - unsafe { std::ptr::NonNull::dangling().as_mut() } - } else { - self.bytes.as_mut_ptr() - }; - for meta in self.metas.drain(..) { // SAFE: The implementation of `write_command` is safe for the according Command type. + // It's ok to read from `bytes.as_mut_ptr()` because we just wrote to it in `push`. // The bytes are safely cast to their original type, safely read, and then dropped. unsafe { - (meta.func)(byte_ptr.add(meta.offset), world); + (meta.func)(self.bytes.as_mut_ptr().add(meta.offset), world); } } } @@ -234,4 +226,17 @@ mod test { fn test_command_is_send() { assert_is_send(SpawnCommand); } + + struct CommandWithPadding(u8, u16); + impl Command for CommandWithPadding { + fn write(self, _: &mut World) {} + } + + #[cfg(miri)] + #[test] + fn test_uninit_bytes() { + let mut queue = CommandQueue::default(); + queue.push(CommandWithPadding(0, 0)); + let _ = format!("{:?}", queue.bytes); + } } diff --git a/crates/bevy_ecs/src/system/commands/mod.rs b/crates/bevy_ecs/src/system/commands/mod.rs index 564f1ab3ebe5f..bf40e13fb98b1 100644 --- a/crates/bevy_ecs/src/system/commands/mod.rs +++ b/crates/bevy_ecs/src/system/commands/mod.rs @@ -1,4 +1,5 @@ mod command_queue; +mod parallel_scope; use crate::{ bundle::Bundle, @@ -8,6 +9,7 @@ use crate::{ }; use bevy_utils::tracing::{error, warn}; pub use command_queue::CommandQueue; +pub use parallel_scope::*; use std::marker::PhantomData; use super::Resource; diff --git a/crates/bevy_ecs/src/system/commands/parallel_scope.rs b/crates/bevy_ecs/src/system/commands/parallel_scope.rs new file mode 100644 index 0000000000000..41dc9b7289192 --- /dev/null +++ b/crates/bevy_ecs/src/system/commands/parallel_scope.rs @@ -0,0 +1,98 @@ +use std::cell::Cell; + +use thread_local::ThreadLocal; + +use crate::{ + entity::Entities, + prelude::World, + system::{SystemParam, SystemParamFetch, SystemParamState}, +}; + +use super::{CommandQueue, Commands}; + +#[doc(hidden)] +#[derive(Default)] +/// The internal [`SystemParamState`] of the [`ParallelCommands`] type +pub struct ParallelCommandsState { + thread_local_storage: ThreadLocal>, +} + +/// An alternative to [`Commands`] that can be used in parallel contexts, such as those in [`Query::par_for_each`](crate::system::Query::par_for_each) +/// +/// Note: Because command application order will depend on how many threads are ran, non-commutative commands may result in non-deterministic results. +/// +/// Example: +/// ``` +/// # use bevy_ecs::prelude::*; +/// # use bevy_tasks::ComputeTaskPool; +/// # +/// # #[derive(Component)] +/// # struct Velocity; +/// # impl Velocity { fn magnitude(&self) -> f32 { 42.0 } } +/// fn parallel_command_system( +/// mut query: Query<(Entity, &Velocity)>, +/// par_commands: ParallelCommands +/// ) { +/// query.par_for_each(32, |(entity, velocity)| { +/// if velocity.magnitude() > 10.0 { +/// par_commands.command_scope(|mut commands| { +/// commands.entity(entity).despawn(); +/// }); +/// } +/// }); +/// } +/// # bevy_ecs::system::assert_is_system(parallel_command_system); +///``` +pub struct ParallelCommands<'w, 's> { + state: &'s mut ParallelCommandsState, + entities: &'w Entities, +} + +impl SystemParam for ParallelCommands<'_, '_> { + type Fetch = ParallelCommandsState; +} + +impl<'w, 's> SystemParamFetch<'w, 's> for ParallelCommandsState { + type Item = ParallelCommands<'w, 's>; + + unsafe fn get_param( + state: &'s mut Self, + _: &crate::system::SystemMeta, + world: &'w World, + _: u32, + ) -> Self::Item { + ParallelCommands { + state, + entities: world.entities(), + } + } +} + +// SAFE: no component or resource access to report +unsafe impl SystemParamState for ParallelCommandsState { + fn init(_: &mut World, _: &mut crate::system::SystemMeta) -> Self { + Self::default() + } + + fn apply(&mut self, world: &mut World) { + for cq in self.thread_local_storage.iter_mut() { + cq.get_mut().apply(world); + } + } +} + +impl<'w, 's> ParallelCommands<'w, 's> { + pub fn command_scope(&self, f: impl FnOnce(Commands) -> R) -> R { + let store = &self.state.thread_local_storage; + let command_queue_cell = store.get_or_default(); + let mut command_queue = command_queue_cell.take(); + + let r = f(Commands::new_from_entities( + &mut command_queue, + self.entities, + )); + + command_queue_cell.set(command_queue); + r + } +} diff --git a/crates/bevy_ecs/src/system/function_system.rs b/crates/bevy_ecs/src/system/function_system.rs index 55ff0752f35e0..9dec1b5f84e3b 100644 --- a/crates/bevy_ecs/src/system/function_system.rs +++ b/crates/bevy_ecs/src/system/function_system.rs @@ -7,7 +7,7 @@ use crate::{ schedule::SystemLabel, system::{ check_system_change_tick, ReadOnlySystemParamFetch, System, SystemParam, SystemParamFetch, - SystemParamState, + SystemParamItem, SystemParamState, }, world::{World, WorldId}, }; @@ -319,7 +319,6 @@ where world_id: Option, archetype_generation: ArchetypeGeneration, // NOTE: PhantomData T> gives this safe Send/Sync impls - #[allow(clippy::type_complexity)] marker: PhantomData (In, Out, Marker)>, } @@ -346,6 +345,16 @@ where } } +impl FunctionSystem +where + Param: SystemParam, +{ + /// Message shown when a system isn't initialised + // When lines get too long, rustfmt can sometimes refuse to format them. + // Work around this by storing the message separately. + const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?"; +} + impl System for FunctionSystem where In: 'static, @@ -380,20 +389,25 @@ where #[inline] unsafe fn run_unsafe(&mut self, input: Self::In, world: &World) -> Self::Out { let change_tick = world.increment_change_tick(); - let out = self.func.run( - input, - self.param_state.as_mut().expect("System's param_state was not found. Did you forget to initialize this system before running it?"), + + // Safety: + // We update the archetype component access correctly based on `Param`'s requirements + // in `update_archetype_component_access`. + // Our caller upholds the requirements. + let params = ::Fetch::get_param( + self.param_state.as_mut().expect(Self::PARAM_MESSAGE), &self.system_meta, world, change_tick, ); + let out = self.func.run(input, params); self.system_meta.last_change_tick = change_tick; out } #[inline] fn apply_buffers(&mut self, world: &mut World) { - let param_state = self.param_state.as_mut().expect("System's param_state was not found. Did you forget to initialize this system before running it?"); + let param_state = self.param_state.as_mut().expect(Self::PARAM_MESSAGE); param_state.apply(world); } @@ -474,19 +488,71 @@ impl SystemLabel for SystemTypeIdLabel { } /// A trait implemented for all functions that can be used as [`System`]s. +/// +/// This trait can be useful for making your own systems which accept other systems, +/// sometimes called higher order systems. +/// +/// This should be used in combination with [`ParamSet`] when calling other systems +/// within your system. +/// Using [`ParamSet`] in this case avoids [`SystemParam`] collisions. +/// +/// # Example +/// +/// To create something like [`ChainSystem`], but in entirely safe code. +/// +/// ```rust +/// use std::num::ParseIntError; +/// +/// use bevy_ecs::prelude::*; +/// use bevy_ecs::system::{SystemParam, SystemParamItem}; +/// +/// // Unfortunately, we need all of these generics. `A` is the first system, with its +/// // parameters and marker type required for coherence. `B` is the second system, and +/// // the other generics are for the input/output types of A and B. +/// /// Chain creates a new system which calls `a`, then calls `b` with the output of `a` +/// pub fn chain( +/// mut a: A, +/// mut b: B, +/// ) -> impl FnMut(In, ParamSet<(SystemParamItem, SystemParamItem)>) -> BOut +/// where +/// // We need A and B to be systems, add those bounds +/// A: SystemParamFunction, +/// B: SystemParamFunction, +/// AParam: SystemParam, +/// BParam: SystemParam, +/// { +/// // The type of `params` is inferred based on the return of this function above +/// move |In(a_in), mut params| { +/// let shared = a.run(a_in, params.p0()); +/// b.run(shared, params.p1()) +/// } +/// } +/// +/// // Usage example for `chain`: +/// fn main() { +/// let mut world = World::default(); +/// world.insert_resource(Message("42".to_string())); +/// +/// // chain the `parse_message_system`'s output into the `filter_system`s input +/// let mut chained_system = IntoSystem::into_system(chain(parse_message, filter)); +/// chained_system.initialize(&mut world); +/// assert_eq!(chained_system.run((), &mut world), Some(42)); +/// } +/// +/// struct Message(String); +/// +/// fn parse_message(message: Res) -> Result { +/// message.0.parse::() +/// } +/// +/// fn filter(In(result): In>) -> Option { +/// result.ok().filter(|&n| n < 100) +/// } +/// ``` +/// [`ChainSystem`]: crate::system::ChainSystem +/// [`ParamSet`]: crate::system::ParamSet pub trait SystemParamFunction: Send + Sync + 'static { - /// # Safety - /// - /// This call might access any of the input parameters in an unsafe way. Make sure the data - /// access is safe in the context of the system scheduler. - unsafe fn run( - &mut self, - input: In, - state: &mut Param::Fetch, - system_meta: &SystemMeta, - world: &World, - change_tick: u32, - ) -> Out; + fn run(&mut self, input: In, param_value: SystemParamItem) -> Out; } macro_rules! impl_system_function { @@ -496,12 +562,13 @@ macro_rules! impl_system_function { where for <'a> &'a mut Func: FnMut($($param),*) -> Out + - FnMut($(<<$param as SystemParam>::Fetch as SystemParamFetch>::Item),*) -> Out, Out: 'static + FnMut($(SystemParamItem<$param>),*) -> Out, Out: 'static { #[inline] - unsafe fn run(&mut self, _input: (), state: &mut <($($param,)*) as SystemParam>::Fetch, system_meta: &SystemMeta, world: &World, change_tick: u32) -> Out { - // Yes, this is strange, but rustc fails to compile this impl - // without using this function. + fn run(&mut self, _input: (), param_value: SystemParamItem< ($($param,)*)>) -> Out { + // Yes, this is strange, but `rustc` fails to compile this impl + // without using this function. It fails to recognise that `func` + // is a function, potentially because of the multiple impls of `FnMut` #[allow(clippy::too_many_arguments)] fn call_inner( mut f: impl FnMut($($param,)*)->Out, @@ -509,7 +576,7 @@ macro_rules! impl_system_function { )->Out{ f($($param,)*) } - let ($($param,)*) = <<($($param,)*) as SystemParam>::Fetch as SystemParamFetch>::get_param(state, system_meta, world, change_tick); + let ($($param,)*) = param_value; call_inner(self, $($param),*) } } @@ -522,7 +589,7 @@ macro_rules! impl_system_function { FnMut(In, $(<<$param as SystemParam>::Fetch as SystemParamFetch>::Item),*) -> Out, Out: 'static { #[inline] - unsafe fn run(&mut self, input: Input, state: &mut <($($param,)*) as SystemParam>::Fetch, system_meta: &SystemMeta, world: &World, change_tick: u32) -> Out { + fn run(&mut self, input: Input, param_value: SystemParamItem< ($($param,)*)>) -> Out { #[allow(clippy::too_many_arguments)] fn call_inner( mut f: impl FnMut(In, $($param,)*)->Out, @@ -531,13 +598,15 @@ macro_rules! impl_system_function { )->Out{ f(input, $($param,)*) } - let ($($param,)*) = <<($($param,)*) as SystemParam>::Fetch as SystemParamFetch>::get_param(state, system_meta, world, change_tick); + let ($($param,)*) = param_value; call_inner(self, In(input), $($param),*) } } }; } +// Note that we rely on the highest impl to be <= the highest order of the tuple impls +// of `SystemParam` created. all_tuples!(impl_system_function, 0, 16, F); /// Used to implicitly convert systems to their default labels. For example, it will convert diff --git a/crates/bevy_ecs/src/system/mod.rs b/crates/bevy_ecs/src/system/mod.rs index b235467d9cc6b..04d14f9afe652 100644 --- a/crates/bevy_ecs/src/system/mod.rs +++ b/crates/bevy_ecs/src/system/mod.rs @@ -83,9 +83,15 @@ pub use system::*; pub use system_chaining::*; pub use system_param::*; +/// Ensure that a given function is a system +/// +/// This should be used when writing doc examples, +/// to confirm that systems used in an example are +/// valid systems pub fn assert_is_system>(sys: S) { if false { // Check it can be converted into a system + // TODO: This should ensure that the system has no conflicting system params IntoSystem::into_system(sys); } } @@ -100,6 +106,7 @@ mod tests { bundle::Bundles, component::{Component, Components}, entity::{Entities, Entity}, + prelude::AnyOf, query::{Added, Changed, Or, With, Without}, schedule::{Schedule, Stage, SystemStage}, system::{ @@ -281,6 +288,65 @@ mod tests { assert_eq!(world.resource::().0, 2); } + #[test] + #[should_panic = "error[B0001]"] + fn option_has_no_filter_with() { + fn sys(_: Query<(Option<&A>, &mut B)>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + + #[test] + fn option_doesnt_remove_unrelated_filter_with() { + fn sys(_: Query<(Option<&A>, &mut B, &A)>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + + #[test] + #[should_panic = "error[B0001]"] + fn any_of_has_no_filter_with() { + fn sys(_: Query<(AnyOf<(&A, ())>, &mut B)>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + + #[test] + fn any_of_has_filter_with_when_both_have_it() { + fn sys(_: Query<(AnyOf<(&A, &A)>, &mut B)>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + + #[test] + fn any_of_doesnt_remove_unrelated_filter_with() { + fn sys(_: Query<(AnyOf<(&A, ())>, &mut B, &A)>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + + #[test] + #[should_panic = "error[B0001]"] + fn or_has_no_filter_with() { + fn sys(_: Query<&mut B, Or<(With, With)>>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + + #[test] + fn or_has_filter_with_when_both_have_it() { + fn sys(_: Query<&mut B, Or<(With, With)>>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + + #[test] + fn or_doesnt_remove_unrelated_filter_with() { + fn sys(_: Query<&mut B, (Or<(With, With)>, With)>, _: Query<&mut B, Without>) {} + let mut world = World::default(); + run_system(&mut world, sys); + } + #[test] #[should_panic] fn conflicting_query_mut_system() { diff --git a/crates/bevy_ecs/src/system/query.rs b/crates/bevy_ecs/src/system/query.rs index c65c85b487433..fd4ecb408e5e9 100644 --- a/crates/bevy_ecs/src/system/query.rs +++ b/crates/bevy_ecs/src/system/query.rs @@ -3,12 +3,12 @@ use crate::{ entity::Entity, query::{ NopFetch, QueryCombinationIter, QueryEntityError, QueryFetch, QueryItem, QueryIter, - QueryState, ROQueryFetch, ROQueryItem, ReadOnlyFetch, WorldQuery, + QueryManyIter, QuerySingleError, QueryState, ROQueryFetch, ROQueryItem, ReadOnlyWorldQuery, + WorldQuery, }, world::{Mut, World}, }; -use bevy_tasks::TaskPool; -use std::{any::TypeId, fmt::Debug}; +use std::{any::TypeId, borrow::Borrow, fmt::Debug}; /// Provides scoped access to components in a [`World`]. /// @@ -388,6 +388,56 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { } } + /// Returns an [`Iterator`] over the query results of a list of [`Entity`]'s. + /// + /// This can only return immutable data (mutable data will be cast to an immutable form). + /// See [`Self::many_for_each_mut`] for queries that contain at least one mutable component. + /// + /// # Examples + /// ``` + /// # use bevy_ecs::prelude::*; + /// #[derive(Component)] + /// struct Counter { + /// value: i32 + /// } + /// + /// #[derive(Component)] + /// struct Friends { + /// list: Vec, + /// } + /// + /// fn system( + /// friends_query: Query<&Friends>, + /// counter_query: Query<&Counter>, + /// ) { + /// for friends in &friends_query { + /// for counter in counter_query.iter_many(&friends.list) { + /// println!("Friend's counter: {:?}", counter.value); + /// } + /// } + /// } + /// # bevy_ecs::system::assert_is_system(system); + /// ``` + #[inline] + pub fn iter_many( + &self, + entities: EntityList, + ) -> QueryManyIter<'_, '_, Q, ROQueryFetch<'_, Q>, F, EntityList::IntoIter> + where + EntityList::Item: Borrow, + { + // SAFETY: system runs without conflicts with other systems. + // same-system queries have runtime borrow checks when they conflict + unsafe { + self.state.iter_many_unchecked_manual( + entities, + self.world, + self.last_change_tick, + self.change_tick, + ) + } + } + /// Returns an [`Iterator`] over the query results. /// /// # Safety @@ -421,6 +471,29 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { ) } + /// Returns an [`Iterator`] over the query results of a list of [`Entity`]'s. + /// + /// If you want safe mutable access to query results of a list of [`Entity`]'s. See [`Self::many_for_each_mut`]. + /// + /// # Safety + /// This allows aliased mutability and does not check for entity uniqueness. + /// You must make sure this call does not result in multiple mutable references to the same component. + /// Particular care must be taken when collecting the data (rather than iterating over it one item at a time) such as via `[Iterator::collect()]`. + pub unsafe fn iter_many_unsafe( + &self, + entities: EntityList, + ) -> QueryManyIter<'_, '_, Q, QueryFetch<'_, Q>, F, EntityList::IntoIter> + where + EntityList::Item: Borrow, + { + self.state.iter_many_unchecked_manual( + entities, + self.world, + self.last_change_tick, + self.change_tick, + ) + } + /// Runs `f` on each query result. This is faster than the equivalent iter() method, but cannot /// be chained like a normal [`Iterator`]. /// @@ -493,7 +566,7 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { }; } - /// Runs `f` on each query result in parallel using the given [`TaskPool`]. + /// Runs `f` on each query result in parallel using the [`World`]'s [`ComputeTaskPool`]. /// /// This can only be called for immutable data, see [`Self::par_for_each_mut`] for /// mutable access. @@ -502,7 +575,7 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { /// /// The items in the query get sorted into batches. /// Internally, this function spawns a group of futures that each take on a `batch_size` sized section of the items (or less if the division is not perfect). - /// Then, the tasks in the [`TaskPool`] work through these futures. + /// Then, the tasks in the [`ComputeTaskPool`] work through these futures. /// /// You can use this value to tune between maximum multithreading ability (many small batches) and minimum parallelization overhead (few big batches). /// Rule of thumb: If the function body is (mostly) computationally expensive but there are not many items, a small batch size (=more batches) may help to even out the load. @@ -510,13 +583,17 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { /// /// # Arguments /// - ///* `task_pool` - The [`TaskPool`] to use ///* `batch_size` - The number of batches to spawn ///* `f` - The function to run on each item in the query + /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::prelude::ComputeTaskPool #[inline] pub fn par_for_each<'this>( &'this self, - task_pool: &TaskPool, batch_size: usize, f: impl Fn(ROQueryItem<'this, Q>) + Send + Sync + Clone, ) { @@ -526,7 +603,6 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { self.state .par_for_each_unchecked_manual::, _>( self.world, - task_pool, batch_size, f, self.last_change_tick, @@ -535,12 +611,17 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { }; } - /// Runs `f` on each query result in parallel using the given [`TaskPool`]. + /// Runs `f` on each query result in parallel using the [`World`]'s [`ComputeTaskPool`]. /// See [`Self::par_for_each`] for more details. + /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::prelude::ComputeTaskPool #[inline] pub fn par_for_each_mut<'a, FN: Fn(QueryItem<'a, Q>) + Send + Sync + Clone>( &'a mut self, - task_pool: &TaskPool, batch_size: usize, f: FN, ) { @@ -550,12 +631,60 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { self.state .par_for_each_unchecked_manual::, FN>( self.world, - task_pool, batch_size, f, self.last_change_tick, self.change_tick, - ) + ); + }; + } + + /// Calls a closure on each result of [`Query`] where the entities match. + /// # Examples + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// #[derive(Component)] + /// struct Counter { + /// value: i32 + /// } + /// + /// #[derive(Component)] + /// struct Friends { + /// list: Vec, + /// } + /// + /// fn system( + /// friends_query: Query<&Friends>, + /// mut counter_query: Query<&mut Counter>, + /// ) { + /// for friends in &friends_query { + /// counter_query.many_for_each_mut(&friends.list, |mut counter| { + /// println!("Friend's counter: {:?}", counter.value); + /// counter.value += 1; + /// }); + /// } + /// } + /// # bevy_ecs::system::assert_is_system(system); + /// ``` + #[inline] + pub fn many_for_each_mut( + &mut self, + entities: EntityList, + f: impl FnMut(QueryItem<'_, Q>), + ) where + EntityList::Item: Borrow, + { + // SAFE: system runs without conflicts with other systems. + // same-system queries have runtime borrow checks when they conflict + unsafe { + self.state.many_for_each_unchecked_manual( + self.world, + entities, + f, + self.last_change_tick, + self.change_tick, + ); }; } @@ -968,7 +1097,7 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { /// /// ``` /// # use bevy_ecs::prelude::*; - /// # use bevy_ecs::system::QuerySingleError; + /// # use bevy_ecs::query::QuerySingleError; /// # #[derive(Component)] /// # struct PlayerScore(i32); /// fn player_scoring_system(query: Query<&PlayerScore>) { @@ -986,17 +1115,14 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { /// } /// # bevy_ecs::system::assert_is_system(player_scoring_system); /// ``` + #[inline] pub fn get_single(&self) -> Result, QuerySingleError> { - let mut query = self.iter(); - let first = query.next(); - let extra = query.next().is_some(); - - match (first, extra) { - (Some(r), false) => Ok(r), - (None, _) => Err(QuerySingleError::NoEntities(std::any::type_name::())), - (Some(_), _) => Err(QuerySingleError::MultipleEntities(std::any::type_name::< - Self, - >())), + unsafe { + self.state.get_single_unchecked_manual::>( + self.world, + self.last_change_tick, + self.change_tick, + ) } } @@ -1051,17 +1177,14 @@ impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { /// } /// # bevy_ecs::system::assert_is_system(regenerate_player_health_system); /// ``` + #[inline] pub fn get_single_mut(&mut self) -> Result, QuerySingleError> { - let mut query = self.iter_mut(); - let first = query.next(); - let extra = query.next().is_some(); - - match (first, extra) { - (Some(r), false) => Ok(r), - (None, _) => Err(QuerySingleError::NoEntities(std::any::type_name::())), - (Some(_), _) => Err(QuerySingleError::MultipleEntities(std::any::type_name::< - Self, - >())), + unsafe { + self.state.get_single_unchecked_manual::>( + self.world, + self.last_change_tick, + self.change_tick, + ) } } @@ -1183,30 +1306,7 @@ impl std::fmt::Display for QueryComponentError { } } -/// An error that occurs when evaluating a [`Query`] as a single expected resulted via -/// [`Query::single`] or [`Query::single_mut`]. -#[derive(Debug)] -pub enum QuerySingleError { - NoEntities(&'static str), - MultipleEntities(&'static str), -} - -impl std::error::Error for QuerySingleError {} - -impl std::fmt::Display for QuerySingleError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - QuerySingleError::NoEntities(query) => write!(f, "No entities fit the query {}", query), - QuerySingleError::MultipleEntities(query) => { - write!(f, "Multiple entities fit the query {}!", query) - } - } - } -} -impl<'w, 's, Q: WorldQuery, F: WorldQuery> Query<'w, 's, Q, F> -where - QueryFetch<'w, Q>: ReadOnlyFetch, -{ +impl<'w, 's, Q: ReadOnlyWorldQuery, F: WorldQuery> Query<'w, 's, Q, F> { /// Returns the query result for the given [`Entity`], with the actual "inner" world lifetime. /// /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is diff --git a/crates/bevy_ecs/src/system/system_param.rs b/crates/bevy_ecs/src/system/system_param.rs index 8b9cf8349d727..96a574dd562df 100644 --- a/crates/bevy_ecs/src/system/system_param.rs +++ b/crates/bevy_ecs/src/system/system_param.rs @@ -6,8 +6,7 @@ use crate::{ component::{Component, ComponentId, ComponentTicks, Components}, entity::{Entities, Entity}, query::{ - Access, FilteredAccess, FilteredAccessSet, QueryFetch, QueryState, ReadOnlyFetch, - WorldQuery, + Access, FilteredAccess, FilteredAccessSet, QueryState, ReadOnlyWorldQuery, WorldQuery, }, system::{CommandQueue, Commands, Query, SystemMeta}, world::{FromWorld, World}, @@ -25,8 +24,27 @@ use std::{ /// /// # Derive /// -/// This trait can be derived with the [`derive@super::SystemParam`] macro. The only requirement -/// is that every struct field must also implement `SystemParam`. +/// This trait can be derived with the [`derive@super::SystemParam`] macro. +/// This macro only works if each field on the derived struct implements [`SystemParam`]. +/// Note: There are additional requirements on the field types. +/// See the *Generic `SystemParam`s* section for details and workarounds of the probable +/// cause if this derive causes an error to be emitted. +/// +/// +/// The struct for which `SystemParam` is derived must (currently) have exactly +/// two lifetime parameters. +/// The first is the lifetime of the world, and the second the lifetime +/// of the parameter's state. +/// +/// ## Attributes +/// +/// `#[system_param(ignore)]`: +/// Can be added to any field in the struct. Fields decorated with this attribute +/// will created with the default value upon realisation. +/// This is most useful for `PhantomData` fields, to ensure that the required lifetimes are +/// used, as shown in the example. +/// +/// # Example /// /// ``` /// # use bevy_ecs::prelude::*; @@ -46,6 +64,30 @@ use std::{ /// /// # bevy_ecs::system::assert_is_system(my_system); /// ``` +/// +/// # Generic `SystemParam`s +/// +/// When using the derive macro, you may see an error in the form of: +/// +/// ```text +/// expected ... [ParamType] +/// found associated type `<<[ParamType] as SystemParam>::Fetch as SystemParamFetch<'_, '_>>::Item` +/// ``` +/// where `[ParamType]` is the type of one of your fields. +/// To solve this error, you can wrap the field of type `[ParamType]` with [`StaticSystemParam`] +/// (i.e. `StaticSystemParam<[ParamType]>`). +/// +/// ## Details +/// +/// The derive macro requires that the [`SystemParam`] implementation of +/// each field `F`'s [`Fetch`](`SystemParam::Fetch`)'s [`Item`](`SystemParamFetch::Item`) is itself `F` +/// (ignoring lifetimes for simplicity). +/// This assumption is due to type inference reasons, so that the derived [`SystemParam`] can be +/// used as an argument to a function system. +/// If the compiler cannot validate this property for `[ParamType]`, it will error in the form shown above. +/// +/// This will most commonly occur when working with `SystemParam`s generically, as the requirement +/// has not been proven to the compiler. pub trait SystemParam: Sized { type Fetch: for<'w, 's> SystemParamFetch<'w, 's>; } @@ -93,10 +135,7 @@ impl<'w, 's, Q: WorldQuery + 'static, F: WorldQuery + 'static> SystemParam for Q } // SAFE: QueryState is constrained to read-only fetches, so it only reads World. -unsafe impl ReadOnlySystemParamFetch for QueryState where - for<'x> QueryFetch<'x, Q>: ReadOnlyFetch -{ -} +unsafe impl ReadOnlySystemParamFetch for QueryState {} // SAFE: Relevant query ComponentId and ArchetypeComponentId access is applied to SystemMeta. If // this QueryState conflicts with any prior access, a panic will occur. @@ -1402,11 +1441,11 @@ unsafe impl SystemParamState } fn new_archetype(&mut self, archetype: &Archetype, system_meta: &mut SystemMeta) { - self.0.new_archetype(archetype, system_meta) + self.0.new_archetype(archetype, system_meta); } fn apply(&mut self, world: &mut World) { - self.0.apply(world) + self.0.apply(world); } } diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index c9b6006381a7d..bf2701714190d 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -1,7 +1,7 @@ use crate::{ archetype::{Archetype, ArchetypeId, Archetypes}, bundle::{Bundle, BundleInfo}, - change_detection::Ticks, + change_detection::{MutUntyped, Ticks}, component::{Component, ComponentId, ComponentTicks, Components, StorageType}, entity::{Entities, Entity, EntityLocation}, storage::{SparseSet, Storages}, @@ -103,7 +103,7 @@ impl<'w> EntityRef<'w> { .map(|(value, ticks)| Mut { value: value.assert_unique().deref_mut::(), ticks: Ticks { - component_ticks: &mut *ticks.get(), + component_ticks: ticks.deref_mut(), last_change_tick, change_tick, }, @@ -111,6 +111,23 @@ impl<'w> EntityRef<'w> { } } +impl<'w> EntityRef<'w> { + /// Gets the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityRef::get`], this returns a raw pointer to the component, + /// which is only valid while the `'w` borrow of the lifetime is active. + #[inline] + pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + self.world.components().get_info(component_id)?; + // SAFE: entity_location is valid, component_id is valid as checked by the line above + unsafe { get_component(self.world, component_id, self.entity, self.location) } + } +} + /// A mutable reference to a particular [`Entity`] and all of its components pub struct EntityMut<'w> { world: &'w mut World, @@ -207,7 +224,7 @@ impl<'w> EntityMut<'w> { .map(|(value, ticks)| Mut { value: value.assert_unique().deref_mut::(), ticks: Ticks { - component_ticks: &mut *ticks.get(), + component_ticks: ticks.deref_mut(), last_change_tick: self.world.last_change_tick(), change_tick: self.world.read_change_tick(), }, @@ -488,14 +505,47 @@ impl<'w> EntityMut<'w> { } } +impl<'w> EntityMut<'w> { + /// Gets the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`EntityMut::get`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityMut::get`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityMut`] is alive. + #[inline] + pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + self.world.components().get_info(component_id)?; + // SAFE: entity_location is valid, component_id is valid as checked by the line above + unsafe { get_component(self.world, component_id, self.entity, self.location) } + } + + /// Gets a [`MutUntyped`] of the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`EntityMut::get_mut`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityMut::get_mut`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityMut`] is alive. + #[inline] + pub fn get_mut_by_id(&mut self, component_id: ComponentId) -> Option> { + self.world.components().get_info(component_id)?; + // SAFE: entity_location is valid, component_id is valid as checked by the line above + unsafe { get_mut_by_id(self.world, self.entity, self.location, component_id) } + } +} + // TODO: move to Storages? /// Get a raw pointer to a particular [`Component`] on a particular [`Entity`] in the provided [`World`]. /// /// # Safety -/// `entity_location` must be within bounds of the given archetype and `entity` must exist inside +/// - `entity_location` must be within bounds of the given archetype and `entity` must exist inside /// the archetype +/// - `component_id` must be valid #[inline] -unsafe fn get_component( +pub(crate) unsafe fn get_component( world: &World, component_id: ComponentId, entity: Entity, @@ -808,7 +858,7 @@ pub(crate) unsafe fn get_mut( |(value, ticks)| Mut { value: value.assert_unique().deref_mut::(), ticks: Ticks { - component_ticks: &mut *ticks.get(), + component_ticks: ticks.deref_mut(), last_change_tick, change_tick, }, @@ -816,8 +866,33 @@ pub(crate) unsafe fn get_mut( ) } +// SAFETY: EntityLocation must be valid, component_id must be valid +#[inline] +pub(crate) unsafe fn get_mut_by_id( + world: &mut World, + entity: Entity, + location: EntityLocation, + component_id: ComponentId, +) -> Option { + // SAFE: world access is unique, entity location and component_id required to be valid + get_component_and_ticks(world, component_id, entity, location).map(|(value, ticks)| { + MutUntyped { + value: value.assert_unique(), + ticks: Ticks { + component_ticks: ticks.deref_mut(), + last_change_tick: world.last_change_tick(), + change_tick: world.read_change_tick(), + }, + } + }) +} + #[cfg(test)] mod tests { + use crate as bevy_ecs; + use crate::component::ComponentId; + use crate::prelude::*; // for the `#[derive(Component)]` + #[test] fn sorted_remove() { let mut a = vec![1, 2, 3, 4, 5, 6, 7]; @@ -838,4 +913,70 @@ mod tests { assert_eq!(a, vec![1]); } + + #[derive(Component)] + struct TestComponent(u32); + + #[test] + fn entity_ref_get_by_id() { + let mut world = World::new(); + let entity = world.spawn().insert(TestComponent(42)).id(); + let component_id = world + .components() + .get_id(std::any::TypeId::of::()) + .unwrap(); + + let entity = world.entity(entity); + let test_component = entity.get_by_id(component_id).unwrap(); + // SAFE: points to a valid `TestComponent` + let test_component = unsafe { test_component.deref::() }; + + assert_eq!(test_component.0, 42); + } + + #[test] + fn entity_mut_get_by_id() { + let mut world = World::new(); + let entity = world.spawn().insert(TestComponent(42)).id(); + let component_id = world + .components() + .get_id(std::any::TypeId::of::()) + .unwrap(); + + let mut entity_mut = world.entity_mut(entity); + let mut test_component = entity_mut.get_mut_by_id(component_id).unwrap(); + { + test_component.set_changed(); + // SAFE: `test_component` has unique access of the `EntityMut` and is not used afterwards + let test_component = + unsafe { test_component.into_inner().deref_mut::() }; + test_component.0 = 43; + } + + let entity = world.entity(entity); + let test_component = entity.get_by_id(component_id).unwrap(); + let test_component = unsafe { test_component.deref::() }; + + assert_eq!(test_component.0, 43); + } + + #[test] + fn entity_ref_get_by_id_invalid_component_id() { + let invalid_component_id = ComponentId::new(usize::MAX); + + let mut world = World::new(); + let entity = world.spawn().id(); + let entity = world.entity(entity); + assert!(entity.get_by_id(invalid_component_id).is_none()); + } + + #[test] + fn entity_mut_get_by_id_invalid_component_id() { + let invalid_component_id = ComponentId::new(usize::MAX); + + let mut world = World::new(); + let mut entity = world.spawn(); + assert!(entity.get_by_id(invalid_component_id).is_none()); + assert!(entity.get_mut_by_id(invalid_component_id).is_none()); + } } diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index bdcd809bd9e23..b6e447e8dcd84 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -10,14 +10,16 @@ pub use world_cell::*; use crate::{ archetype::{ArchetypeComponentId, ArchetypeComponentInfo, ArchetypeId, Archetypes}, bundle::{Bundle, BundleInserter, BundleSpawner, Bundles}, - change_detection::Ticks, - component::{Component, ComponentId, ComponentTicks, Components, StorageType}, + change_detection::{MutUntyped, Ticks}, + component::{ + Component, ComponentDescriptor, ComponentId, ComponentTicks, Components, StorageType, + }, entity::{AllocAtWithoutReplacement, Entities, Entity}, query::{QueryState, WorldQuery}, storage::{Column, SparseSet, Storages}, system::Resource, }; -use bevy_ptr::{OwningPtr, UnsafeCellDeref}; +use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref}; use bevy_utils::tracing::debug; use std::{ any::TypeId, @@ -179,6 +181,14 @@ impl World { self.components.init_component::(&mut self.storages) } + pub fn init_component_with_descriptor( + &mut self, + descriptor: ComponentDescriptor, + ) -> ComponentId { + self.components + .init_component_with_descriptor(&mut self.storages, descriptor) + } + /// Retrieves an [`EntityRef`] that exposes read-only operations for the given `entity`. /// This will panic if the `entity` does not exist. Use [`World::get_entity`] if you want /// to check for entity existence instead of implicitly panic-ing. @@ -1157,8 +1167,43 @@ impl World { } } + /// Inserts a new resource with the given `value`. Will replace the value if it already existed. + /// + /// **You should prefer to use the typed API [`World::insert_resource`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + /// /// # Safety - /// `component_id` must be valid and correspond to a resource component of type `R` + /// The value referenced by `value` must be valid for the given [`ComponentId`] of this world + pub unsafe fn insert_resource_by_id( + &mut self, + component_id: ComponentId, + value: OwningPtr<'_>, + ) { + let change_tick = self.change_tick(); + + self.components().get_info(component_id).unwrap_or_else(|| { + panic!( + "insert_resource_by_id called with component id which doesn't exist in this world" + ) + }); + // SAFE: component_id is valid, checked by the lines above + let column = self.initialize_resource_internal(component_id); + if column.is_empty() { + // SAFE: column is of type R and has been allocated above + column.push(value, ComponentTicks::new(change_tick)); + } else { + let ptr = column.get_data_unchecked_mut(0); + std::ptr::copy_nonoverlapping::( + value.as_ptr(), + ptr.as_ptr(), + column.item_layout().size(), + ); + column.get_ticks_unchecked_mut(0).set_changed(change_tick); + } + } + + /// # Safety + /// `component_id` must be valid for this world #[inline] unsafe fn initialize_resource_internal(&mut self, component_id: ComponentId) -> &mut Column { // SAFE: resource archetype always exists @@ -1225,6 +1270,14 @@ impl World { ); } + pub(crate) fn validate_non_send_access_untyped(&self, name: &str) { + assert!( + self.main_thread_validator.is_main_thread(), + "attempted to access NonSend resource {} off of the main thread", + name + ); + } + /// Empties queued entities and adds them to the empty [Archetype](crate::archetype::Archetype). /// This should be called before doing operations that might operate on queued entities, /// such as inserting a [Component]. @@ -1282,6 +1335,120 @@ impl World { } } +impl World { + /// Gets a resource to the resource with the id [`ComponentId`] if it exists. + /// The returned pointer must not be used to modify the resource, and must not be + /// dereferenced after the immutable borrow of the [`World`] ends. + /// + /// **You should prefer to use the typed API [`World::get_resource`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + #[inline] + pub fn get_resource_by_id(&self, component_id: ComponentId) -> Option> { + let info = self.components.get_info(component_id)?; + if !info.is_send_and_sync() { + self.validate_non_send_access_untyped(info.name()); + } + + let column = self.get_populated_resource_column(component_id)?; + Some(column.get_data_ptr()) + } + + /// Gets a resource to the resource with the id [`ComponentId`] if it exists. + /// The returned pointer may be used to modify the resource, as long as the mutable borrow + /// of the [`World`] is still valid. + /// + /// **You should prefer to use the typed API [`World::get_resource_mut`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + #[inline] + pub fn get_resource_mut_by_id(&mut self, component_id: ComponentId) -> Option> { + let info = self.components.get_info(component_id)?; + if !info.is_send_and_sync() { + self.validate_non_send_access_untyped(info.name()); + } + + let column = self.get_populated_resource_column(component_id)?; + + // SAFE: get_data_ptr requires that the mutability rules are not violated, and the caller promises + // to only modify the resource while the mutable borrow of the world is valid + let ticks = Ticks { + // - index is in-bounds because the column is initialized and non-empty + // - no other reference to the ticks of the same row can exist at the same time + component_ticks: unsafe { &mut *column.get_ticks_unchecked(0).get() }, + last_change_tick: self.last_change_tick(), + change_tick: self.read_change_tick(), + }; + + Some(MutUntyped { + value: unsafe { column.get_data_ptr().assert_unique() }, + ticks, + }) + } + + /// Removes the resource of a given type, if it exists. Otherwise returns [None]. + /// + /// **You should prefer to use the typed API [`World::remove_resource`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + pub fn remove_resource_by_id(&mut self, component_id: ComponentId) -> Option<()> { + let info = self.components.get_info(component_id)?; + if !info.is_send_and_sync() { + self.validate_non_send_access_untyped(info.name()); + } + + let resource_archetype = self.archetypes.resource_mut(); + let unique_components = resource_archetype.unique_components_mut(); + let column = unique_components.get_mut(component_id)?; + if column.is_empty() { + return None; + } + // SAFE: if a resource column exists, row 0 exists as well + unsafe { column.swap_remove_unchecked(0) }; + + Some(()) + } + + /// Retrieves a mutable untyped reference to the given `entity`'s [Component] of the given [`ComponentId`]. + /// Returns [None] if the `entity` does not have a [Component] of the given type. + /// + /// **You should prefer to use the typed API [`World::get_mut`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + #[inline] + pub fn get_by_id(&self, entity: Entity, component_id: ComponentId) -> Option> { + self.components().get_info(component_id)?; + // SAFE: entity_location is valid, component_id is valid as checked by the line above + unsafe { + get_component( + self, + component_id, + entity, + self.get_entity(entity)?.location(), + ) + } + } + + /// Retrieves a mutable untyped reference to the given `entity`'s [Component] of the given [`ComponentId`]. + /// Returns [None] if the `entity` does not have a [Component] of the given type. + /// + /// **You should prefer to use the typed API [`World::get_mut`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + #[inline] + pub fn get_mut_by_id( + &mut self, + entity: Entity, + component_id: ComponentId, + ) -> Option> { + self.components().get_info(component_id)?; + // SAFE: entity_location is valid, component_id is valid as checked by the line above + unsafe { + get_mut_by_id( + self, + entity, + self.get_entity(entity)?.location(), + component_id, + ) + } + } +} + impl fmt::Debug for World { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("World") @@ -1338,11 +1505,16 @@ impl Default for MainThreadValidator { #[cfg(test)] mod tests { use super::World; + use crate::{ + change_detection::DetectChanges, + component::{ComponentDescriptor, ComponentId, StorageType}, + ptr::OwningPtr, + }; use bevy_ecs_macros::Component; use std::{ panic, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU32, Ordering}, Arc, Mutex, }, }; @@ -1462,4 +1634,97 @@ mod tests { ] ); } + + #[derive(Component)] + struct TestResource(u32); + + #[test] + fn get_resource_by_id() { + let mut world = World::new(); + world.insert_resource(TestResource(42)); + let component_id = world + .components() + .get_resource_id(std::any::TypeId::of::()) + .unwrap(); + + let resource = world.get_resource_by_id(component_id).unwrap(); + let resource = unsafe { resource.deref::() }; + + assert_eq!(resource.0, 42); + } + + #[test] + fn get_resource_mut_by_id() { + let mut world = World::new(); + world.insert_resource(TestResource(42)); + let component_id = world + .components() + .get_resource_id(std::any::TypeId::of::()) + .unwrap(); + + { + let mut resource = world.get_resource_mut_by_id(component_id).unwrap(); + resource.set_changed(); + let resource = unsafe { resource.into_inner().deref_mut::() }; + resource.0 = 43; + } + + let resource = world.get_resource_by_id(component_id).unwrap(); + let resource = unsafe { resource.deref::() }; + + assert_eq!(resource.0, 43); + } + + #[test] + fn custom_resource_with_layout() { + static DROP_COUNT: AtomicU32 = AtomicU32::new(0); + + let mut world = World::new(); + + // SAFE: the drop function is valid for the layout and the data will be safe to access from any thread + let descriptor = unsafe { + ComponentDescriptor::new_with_layout( + "Custom Test Component".to_string(), + StorageType::Table, + std::alloc::Layout::new::<[u8; 8]>(), + Some(|ptr| { + let data = ptr.read::<[u8; 8]>(); + assert_eq!(data, [0, 1, 2, 3, 4, 5, 6, 7]); + DROP_COUNT.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + }), + ) + }; + + let component_id = world.init_component_with_descriptor(descriptor); + + let value: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; + OwningPtr::make(value, |ptr| unsafe { + // SAFE: value is valid for the component layout + world.insert_resource_by_id(component_id, ptr); + }); + + let data = unsafe { + world + .get_resource_by_id(component_id) + .unwrap() + .deref::<[u8; 8]>() + }; + assert_eq!(*data, [0, 1, 2, 3, 4, 5, 6, 7]); + + assert!(world.remove_resource_by_id(component_id).is_some()); + + assert_eq!(DROP_COUNT.load(std::sync::atomic::Ordering::SeqCst), 1); + } + + #[test] + #[should_panic = "insert_resource_by_id called with component id which doesn't exist in this world"] + fn insert_resource_by_id_invalid_component_id() { + let invalid_component_id = ComponentId::new(usize::MAX); + + let mut world = World::new(); + OwningPtr::make((), |ptr| unsafe { + // SAFE: ptr must be valid for the component_id `invalid_component_id` which is invalid, but checked by `insert_resource_by_id` + world.insert_resource_by_id(invalid_component_id, ptr); + }); + } } diff --git a/crates/bevy_ecs/src/world/spawn_batch.rs b/crates/bevy_ecs/src/world/spawn_batch.rs index 8ca0bc8d75068..002e30a2b3f3a 100644 --- a/crates/bevy_ecs/src/world/spawn_batch.rs +++ b/crates/bevy_ecs/src/world/spawn_batch.rs @@ -3,6 +3,7 @@ use crate::{ entity::Entity, world::World, }; +use std::iter::FusedIterator; pub struct SpawnBatchIter<'w, I> where @@ -84,3 +85,10 @@ where self.inner.len() } } + +impl FusedIterator for SpawnBatchIter<'_, I> +where + I: FusedIterator, + T: Bundle, +{ +} diff --git a/crates/bevy_ecs/src/world/world_cell.rs b/crates/bevy_ecs/src/world/world_cell.rs index 5a1fff6cdf9a2..ba4d0b701103d 100644 --- a/crates/bevy_ecs/src/world/world_cell.rs +++ b/crates/bevy_ecs/src/world/world_cell.rs @@ -420,12 +420,11 @@ mod tests { } #[test] - #[should_panic] fn world_cell_ref_and_ref() { let mut world = World::default(); world.insert_resource(1u32); let cell = world.cell(); - let _value_a = cell.resource_mut::(); + let _value_a = cell.resource::(); let _value_b = cell.resource::(); } } diff --git a/crates/bevy_ecs_compile_fail_tests/Cargo.toml b/crates/bevy_ecs_compile_fail_tests/Cargo.toml index 7fa3316d360ae..e48c4c03e3954 100644 --- a/crates/bevy_ecs_compile_fail_tests/Cargo.toml +++ b/crates/bevy_ecs_compile_fail_tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bevy_ecs_compile_fail_tests" -version = "0.8.0-dev" +version = "0.1.0" edition = "2021" description = "Compile fail tests for Bevy Engine's entity component system" homepage = "https://bevyengine.org" @@ -9,5 +9,5 @@ license = "MIT OR Apache-2.0" publish = false [dev-dependencies] -bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev" } +bevy_ecs = { path = "../bevy_ecs" } trybuild = "1.0" diff --git a/crates/bevy_ecs_compile_fail_tests/tests/ui/system_param_derive_readonly.stderr b/crates/bevy_ecs_compile_fail_tests/tests/ui/system_param_derive_readonly.stderr index 870e2fb3131c5..a6da80b259cda 100644 --- a/crates/bevy_ecs_compile_fail_tests/tests/ui/system_param_derive_readonly.stderr +++ b/crates/bevy_ecs_compile_fail_tests/tests/ui/system_param_derive_readonly.stderr @@ -6,12 +6,13 @@ warning: unused import: `SystemState` | = note: `#[warn(unused_imports)]` on by default -error[E0277]: the trait bound `for<'x> WriteFetch<'x, Foo>: ReadOnlyFetch` is not satisfied +error[E0277]: the trait bound `&'static mut Foo: ReadOnlyWorldQuery` is not satisfied --> tests/ui/system_param_derive_readonly.rs:18:5 | 18 | assert_readonly::(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `for<'x> ReadOnlyFetch` is not implemented for `WriteFetch<'x, Foo>` + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `ReadOnlyWorldQuery` is not implemented for `&'static mut Foo` | + = note: `ReadOnlyWorldQuery` is implemented for `&'static Foo`, but not for `&'static mut Foo` = note: required because of the requirements on the impl of `ReadOnlySystemParamFetch` for `QueryState<&'static mut Foo>` = note: 2 redundant requirements hidden = note: required because of the requirements on the impl of `ReadOnlySystemParamFetch` for `_::FetchState<(QueryState<&'static mut Foo>,)>` diff --git a/crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.rs b/crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.rs new file mode 100644 index 0000000000000..e35ba6dbc57d2 --- /dev/null +++ b/crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.rs @@ -0,0 +1,14 @@ +use bevy_ecs::prelude::*; + +#[derive(Component)] +struct A(usize); + +fn system(mut query: Query<&mut A>, e: Res) { + let mut results = Vec::new(); + query.many_for_each_mut(vec![*e, *e], |a| { + // this should fail to compile + results.push(a); + }); +} + +fn main() {} diff --git a/crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.stderr b/crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.stderr new file mode 100644 index 0000000000000..b37e88e932473 --- /dev/null +++ b/crates/bevy_ecs_compile_fail_tests/tests/ui/system_query_many_for_each_mut_lifetime_safety.stderr @@ -0,0 +1,10 @@ +error[E0521]: borrowed data escapes outside of closure + --> tests/ui/system_query_many_for_each_mut_lifetime_safety.rs:10:9 + | +7 | let mut results = Vec::new(); + | ----------- `results` declared here, outside of the closure body +8 | query.many_for_each_mut(vec![*e, *e], |a| { + | - `a` is a reference that is only valid in the closure body +9 | // this should fail to compile +10 | results.push(a); + | ^^^^^^^^^^^^^^^ `a` escapes the closure body here diff --git a/crates/bevy_ecs_compile_fail_tests/tests/ui/world_query_derive.stderr b/crates/bevy_ecs_compile_fail_tests/tests/ui/world_query_derive.stderr index 6ced5ae9c789c..397c5685b6a28 100644 --- a/crates/bevy_ecs_compile_fail_tests/tests/ui/world_query_derive.stderr +++ b/crates/bevy_ecs_compile_fail_tests/tests/ui/world_query_derive.stderr @@ -1,8 +1,8 @@ -error[E0277]: the trait bound `WriteFetch<'_, Foo>: ReadOnlyFetch` is not satisfied - --> tests/ui/world_query_derive.rs:7:10 +error[E0277]: the trait bound `&'static mut Foo: ReadOnlyWorldQuery` is not satisfied + --> tests/ui/world_query_derive.rs:9:8 | -7 | #[derive(WorldQuery)] - | ^^^^^^^^^^ the trait `ReadOnlyFetch` is not implemented for `WriteFetch<'_, Foo>` +9 | a: &'static mut Foo, + | ^^^^^^^^^^^^^^^^ the trait `ReadOnlyWorldQuery` is not implemented for `&'static mut Foo` | note: required by a bound in `_::assert_readonly` --> tests/ui/world_query_derive.rs:7:10 @@ -11,11 +11,11 @@ note: required by a bound in `_::assert_readonly` | ^^^^^^^^^^ required by this bound in `_::assert_readonly` = note: this error originates in the derive macro `WorldQuery` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `MutableMarkedFetch<'_>: ReadOnlyFetch` is not satisfied - --> tests/ui/world_query_derive.rs:18:10 +error[E0277]: the trait bound `MutableMarked: ReadOnlyWorldQuery` is not satisfied + --> tests/ui/world_query_derive.rs:20:8 | -18 | #[derive(WorldQuery)] - | ^^^^^^^^^^ the trait `ReadOnlyFetch` is not implemented for `MutableMarkedFetch<'_>` +20 | a: MutableMarked, + | ^^^^^^^^^^^^^ the trait `ReadOnlyWorldQuery` is not implemented for `MutableMarked` | note: required by a bound in `_::assert_readonly` --> tests/ui/world_query_derive.rs:18:10 diff --git a/crates/bevy_encase_derive/Cargo.toml b/crates/bevy_encase_derive/Cargo.toml new file mode 100644 index 0000000000000..16e518a573a1d --- /dev/null +++ b/crates/bevy_encase_derive/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "bevy_encase_derive" +version = "0.8.0-dev" +edition = "2021" +description = "Bevy derive macro for encase" +homepage = "https://bevyengine.org" +repository = "https://github.com/bevyengine/bevy" +license = "MIT OR Apache-2.0" +keywords = ["bevy"] + +[lib] +proc-macro = true + +[dependencies] +bevy_macro_utils = { path = "../bevy_macro_utils", version = "0.8.0-dev" } +encase_derive_impl = "0.2" diff --git a/crates/bevy_encase_derive/src/lib.rs b/crates/bevy_encase_derive/src/lib.rs new file mode 100644 index 0000000000000..0ef5c43817358 --- /dev/null +++ b/crates/bevy_encase_derive/src/lib.rs @@ -0,0 +1,40 @@ +use bevy_macro_utils::BevyManifest; +use encase_derive_impl::{implement, syn}; + +const BEVY: &str = "bevy"; +const BEVY_RENDER: &str = "bevy_render"; +const ENCASE: &str = "encase"; + +fn bevy_encase_path() -> syn::Path { + let bevy_manifest = BevyManifest::default(); + bevy_manifest + .maybe_get_path(BEVY) + .map(|bevy_path| { + let mut segments = bevy_path.segments; + segments.push(BevyManifest::parse_str("render")); + syn::Path { + leading_colon: None, + segments, + } + }) + .or_else(|| bevy_manifest.maybe_get_path(BEVY_RENDER)) + .map(|bevy_render_path| { + let mut segments = bevy_render_path.segments; + segments.push(BevyManifest::parse_str("render_resource")); + syn::Path { + leading_colon: None, + segments, + } + }) + .map(|path| { + let mut segments = path.segments; + segments.push(BevyManifest::parse_str(ENCASE)); + syn::Path { + leading_colon: None, + segments, + } + }) + .unwrap_or_else(|| bevy_manifest.get_path(ENCASE)) +} + +implement!(bevy_encase_path()); diff --git a/crates/bevy_gilrs/Cargo.toml b/crates/bevy_gilrs/Cargo.toml index f67ce8794cfdb..1bcd71340cf17 100644 --- a/crates/bevy_gilrs/Cargo.toml +++ b/crates/bevy_gilrs/Cargo.toml @@ -16,4 +16,4 @@ bevy_input = { path = "../bevy_input", version = "0.8.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } # other -gilrs = { version = "0.8.0", features = ["wasm-bindgen"] } +gilrs = "0.9.0" diff --git a/crates/bevy_gltf/Cargo.toml b/crates/bevy_gltf/Cargo.toml index c2cb4bcf6dfd0..5d9ab53ad21c6 100644 --- a/crates/bevy_gltf/Cargo.toml +++ b/crates/bevy_gltf/Cargo.toml @@ -14,6 +14,7 @@ bevy_animation = { path = "../bevy_animation", version = "0.8.0-dev", optional = bevy_app = { path = "../bevy_app", version = "0.8.0-dev" } bevy_asset = { path = "../bevy_asset", version = "0.8.0-dev" } bevy_core = { path = "../bevy_core", version = "0.8.0-dev" } +bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.8.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev" } bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.8.0-dev" } bevy_log = { path = "../bevy_log", version = "0.8.0-dev" } @@ -23,6 +24,7 @@ bevy_reflect = { path = "../bevy_reflect", version = "0.8.0-dev", features = ["b bevy_render = { path = "../bevy_render", version = "0.8.0-dev" } bevy_scene = { path = "../bevy_scene", version = "0.8.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.8.0-dev" } +bevy_tasks = { path = "../bevy_tasks", version = "0.8.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } # other diff --git a/crates/bevy_gltf/src/loader.rs b/crates/bevy_gltf/src/loader.rs index 5965109062947..39891699345e7 100644 --- a/crates/bevy_gltf/src/loader.rs +++ b/crates/bevy_gltf/src/loader.rs @@ -3,6 +3,7 @@ use bevy_asset::{ AssetIoError, AssetLoader, AssetPath, BoxedFuture, Handle, LoadContext, LoadedAsset, }; use bevy_core::Name; +use bevy_core_pipeline::prelude::Camera3d; use bevy_ecs::{entity::Entity, prelude::FromWorld, world::World}; use bevy_hierarchy::{BuildWorldChildren, WorldChildBuilder}; use bevy_log::warn; @@ -13,7 +14,7 @@ use bevy_pbr::{ }; use bevy_render::{ camera::{ - Camera, Camera3d, CameraProjection, OrthographicProjection, PerspectiveProjection, + Camera, CameraRenderGraph, OrthographicProjection, PerspectiveProjection, Projection, ScalingMode, }, color::Color, @@ -24,10 +25,12 @@ use bevy_render::{ primitives::{Aabb, Frustum}, render_resource::{AddressMode, Face, FilterMode, PrimitiveTopology, SamplerDescriptor}, renderer::RenderDevice, - texture::{CompressedImageFormats, Image, ImageType, TextureError}, + texture::{CompressedImageFormats, Image, ImageSampler, ImageType, TextureError}, view::VisibleEntities, }; use bevy_scene::Scene; +#[cfg(not(target_arch = "wasm32"))] +use bevy_tasks::IoTaskPool; use bevy_transform::{components::Transform, TransformBundle}; use bevy_utils::{HashMap, HashSet}; @@ -62,6 +65,8 @@ pub enum GltfError { AssetIoError(#[from] AssetIoError), #[error("Missing sampler for animation {0}")] MissingAnimationSampler(usize), + #[error("failed to generate tangents: {0}")] + GenerateTangentsError(#[from] bevy_render::mesh::GenerateTangentsError), } /// Loads glTF files with all of their data as their corresponding bevy representations. @@ -250,13 +255,6 @@ async fn load_gltf<'a, 'b>( mesh.insert_attribute(Mesh::ATTRIBUTE_NORMAL, vertex_attribute); } - if let Some(vertex_attribute) = reader - .read_tangents() - .map(|v| VertexAttributeValues::Float32x4(v.collect())) - { - mesh.insert_attribute(Mesh::ATTRIBUTE_TANGENT, vertex_attribute); - } - if let Some(vertex_attribute) = reader .read_tex_coords(0) .map(|v| VertexAttributeValues::Float32x2(v.into_f32().collect())) @@ -309,6 +307,25 @@ async fn load_gltf<'a, 'b>( } } + if let Some(vertex_attribute) = reader + .read_tangents() + .map(|v| VertexAttributeValues::Float32x4(v.collect())) + { + mesh.insert_attribute(Mesh::ATTRIBUTE_TANGENT, vertex_attribute); + } else if mesh.attribute(Mesh::ATTRIBUTE_NORMAL).is_some() + && primitive.material().normal_texture().is_some() + { + bevy_log::debug!( + "Missing vertex tangents, computing them using the mikktspace algorithm" + ); + if let Err(err) = mesh.generate_tangents() { + bevy_log::warn!( + "Failed to generate vertex tangents using the mikktspace algorithm: {:?}", + err + ); + } + } + let mesh = load_context.set_labeled_asset(&primitive_label, LoadedAsset::new(mesh)); primitives.push(super::GltfPrimitive { mesh, @@ -318,6 +335,7 @@ async fn load_gltf<'a, 'b>( .and_then(|i| materials.get(i).cloned()), }); } + let handle = load_context.set_labeled_asset( &mesh_label(&mesh), LoadedAsset::new(super::GltfMesh { primitives }), @@ -394,8 +412,7 @@ async fn load_gltf<'a, 'b>( } } else { #[cfg(not(target_arch = "wasm32"))] - load_context - .task_pool() + IoTaskPool::get() .scope(|scope| { gltf.textures().for_each(|gltf_texture| { let linear_textures = &linear_textures; @@ -444,6 +461,7 @@ async fn load_gltf<'a, 'b>( let mut scenes = vec![]; let mut named_scenes = HashMap::default(); + let mut active_camera_found = false; for scene in gltf.scenes() { let mut err = None; let mut world = World::default(); @@ -462,6 +480,7 @@ async fn load_gltf<'a, 'b>( &buffer_data, &mut node_index_to_entity_map, &mut entity_to_skin_index_map, + &mut active_camera_found, ); if result.is_err() { err = Some(result); @@ -601,7 +620,7 @@ async fn load_texture<'a>( )? } }; - texture.sampler_descriptor = texture_sampler(&gltf_texture); + texture.sampler_descriptor = ImageSampler::Descriptor(texture_sampler(&gltf_texture)); Ok((texture, texture_label(&gltf_texture))) } @@ -613,55 +632,45 @@ fn load_material(material: &Material, load_context: &mut LoadContext) -> Handle< let pbr = material.pbr_metallic_roughness(); let color = pbr.base_color_factor(); - let base_color_texture = if let Some(info) = pbr.base_color_texture() { + let base_color_texture = pbr.base_color_texture().map(|info| { // TODO: handle info.tex_coord() (the *set* index for the right texcoords) let label = texture_label(&info.texture()); let path = AssetPath::new_ref(load_context.path(), Some(&label)); - Some(load_context.get_handle(path)) - } else { - None - }; + load_context.get_handle(path) + }); let normal_map_texture: Option> = - if let Some(normal_texture) = material.normal_texture() { + material.normal_texture().map(|normal_texture| { // TODO: handle normal_texture.scale // TODO: handle normal_texture.tex_coord() (the *set* index for the right texcoords) let label = texture_label(&normal_texture.texture()); let path = AssetPath::new_ref(load_context.path(), Some(&label)); - Some(load_context.get_handle(path)) - } else { - None - }; + load_context.get_handle(path) + }); - let metallic_roughness_texture = if let Some(info) = pbr.metallic_roughness_texture() { + let metallic_roughness_texture = pbr.metallic_roughness_texture().map(|info| { // TODO: handle info.tex_coord() (the *set* index for the right texcoords) let label = texture_label(&info.texture()); let path = AssetPath::new_ref(load_context.path(), Some(&label)); - Some(load_context.get_handle(path)) - } else { - None - }; + load_context.get_handle(path) + }); - let occlusion_texture = if let Some(occlusion_texture) = material.occlusion_texture() { + let occlusion_texture = material.occlusion_texture().map(|occlusion_texture| { // TODO: handle occlusion_texture.tex_coord() (the *set* index for the right texcoords) // TODO: handle occlusion_texture.strength() (a scalar multiplier for occlusion strength) let label = texture_label(&occlusion_texture.texture()); let path = AssetPath::new_ref(load_context.path(), Some(&label)); - Some(load_context.get_handle(path)) - } else { - None - }; + load_context.get_handle(path) + }); let emissive = material.emissive_factor(); - let emissive_texture = if let Some(info) = material.emissive_texture() { + let emissive_texture = material.emissive_texture().map(|info| { // TODO: handle occlusion_texture.tex_coord() (the *set* index for the right texcoords) // TODO: handle occlusion_texture.strength() (a scalar multiplier for occlusion strength) let label = texture_label(&info.texture()); let path = AssetPath::new_ref(load_context.path(), Some(&label)); - Some(load_context.get_handle(path)) - } else { - None - }; + load_context.get_handle(path) + }); load_context.set_labeled_asset( &material_label, @@ -696,6 +705,7 @@ fn load_node( buffer_data: &[Vec], node_index_to_entity_map: &mut HashMap, entity_to_skin_index_map: &mut HashMap, + active_camera_found: &mut bool, ) -> Result<(), GltfError> { let transform = gltf_node.transform(); let mut gltf_error = None; @@ -713,30 +723,18 @@ fn load_node( // create camera node if let Some(camera) = gltf_node.camera() { - node.insert_bundle(( - VisibleEntities { - ..Default::default() - }, - Frustum::default(), - )); - - match camera.projection() { + let projection = match camera.projection() { gltf::camera::Projection::Orthographic(orthographic) => { let xmag = orthographic.xmag(); let orthographic_projection: OrthographicProjection = OrthographicProjection { far: orthographic.zfar(), near: orthographic.znear(), - scaling_mode: ScalingMode::FixedHorizontal, - scale: xmag / 2.0, + scaling_mode: ScalingMode::FixedHorizontal(1.0), + scale: xmag, ..Default::default() }; - node.insert(Camera { - projection_matrix: orthographic_projection.get_projection_matrix(), - ..Default::default() - }); - node.insert(orthographic_projection); - node.insert(Camera3d); + Projection::Orthographic(orthographic_projection) } gltf::camera::Projection::Perspective(perspective) => { let mut perspective_projection: PerspectiveProjection = PerspectiveProjection { @@ -750,14 +748,23 @@ fn load_node( if let Some(aspect_ratio) = perspective.aspect_ratio() { perspective_projection.aspect_ratio = aspect_ratio; } - node.insert(Camera { - projection_matrix: perspective_projection.get_projection_matrix(), - ..Default::default() - }); - node.insert(perspective_projection); - node.insert(Camera3d); + Projection::Perspective(perspective_projection) } - } + }; + + node.insert_bundle(( + projection, + Camera { + is_active: !*active_camera_found, + ..Default::default() + }, + VisibleEntities::default(), + Frustum::default(), + Camera3d::default(), + CameraRenderGraph::new(bevy_core_pipeline::core_3d::graph::NAME), + )); + + *active_camera_found = true; } // Map node index to entity @@ -870,6 +877,7 @@ fn load_node( buffer_data, node_index_to_entity_map, entity_to_skin_index_map, + active_camera_found, ) { gltf_error = Some(err); return; diff --git a/crates/bevy_hierarchy/src/child_builder.rs b/crates/bevy_hierarchy/src/child_builder.rs index 3dd47da3878a6..54d65646cf357 100644 --- a/crates/bevy_hierarchy/src/child_builder.rs +++ b/crates/bevy_hierarchy/src/child_builder.rs @@ -1,11 +1,13 @@ -use crate::prelude::{Children, Parent, PreviousParent}; +use smallvec::SmallVec; + use bevy_ecs::{ bundle::Bundle, entity::Entity, system::{Command, Commands, EntityCommands}, world::{EntityMut, World}, }; -use smallvec::SmallVec; + +use crate::prelude::{Children, Parent, PreviousParent}; /// Command that adds a child to an entity #[derive(Debug)] @@ -165,7 +167,40 @@ impl<'w, 's, 'a> ChildBuilder<'w, 's, 'a> { /// Trait defining how to build children pub trait BuildChildren { /// Creates a [`ChildBuilder`] with the given children built in the given closure + /// + /// Compared to [`add_children`][BuildChildren::add_children], this method returns self + /// to allow chaining. fn with_children(&mut self, f: impl FnOnce(&mut ChildBuilder)) -> &mut Self; + /// Creates a [`ChildBuilder`] with the given children built in the given closure + /// + /// Compared to [`with_children`][BuildChildren::with_children], this method returns the + /// the value returned from the closure, but doesn't allow chaining. + /// + /// ## Example + /// + /// ```no_run + /// # use bevy_ecs::prelude::*; + /// # use bevy_hierarchy::*; + /// # + /// # #[derive(Component)] + /// # struct SomethingElse; + /// # + /// # #[derive(Component)] + /// # struct MoreStuff; + /// # + /// # fn foo(mut commands: Commands) { + /// let mut parent_commands = commands.spawn(); + /// let child_id = parent_commands.add_children(|parent| { + /// parent.spawn().id() + /// }); + /// + /// parent_commands.insert(SomethingElse); + /// commands.entity(child_id).with_children(|parent| { + /// parent.spawn().insert(MoreStuff); + /// }); + /// # } + /// ``` + fn add_children(&mut self, f: impl FnOnce(&mut ChildBuilder) -> T) -> T; /// Pushes children to the back of the builder's children fn push_children(&mut self, children: &[Entity]) -> &mut Self; /// Inserts children at the given index @@ -178,21 +213,25 @@ pub trait BuildChildren { impl<'w, 's, 'a> BuildChildren for EntityCommands<'w, 's, 'a> { fn with_children(&mut self, spawn_children: impl FnOnce(&mut ChildBuilder)) -> &mut Self { + self.add_children(spawn_children); + self + } + + fn add_children(&mut self, spawn_children: impl FnOnce(&mut ChildBuilder) -> T) -> T { let parent = self.id(); - let push_children = { - let mut builder = ChildBuilder { - commands: self.commands(), - push_children: PushChildren { - children: SmallVec::default(), - parent, - }, - }; - spawn_children(&mut builder); - builder.push_children + let mut builder = ChildBuilder { + commands: self.commands(), + push_children: PushChildren { + children: SmallVec::default(), + parent, + }, }; - self.commands().add(push_children); - self + let result = spawn_children(&mut builder); + let children = builder.push_children; + self.commands().add(children); + + result } fn push_children(&mut self, children: &[Entity]) -> &mut Self { @@ -460,15 +499,18 @@ impl<'w> BuildWorldChildren for WorldChildBuilder<'w> { #[cfg(test)] mod tests { - use super::{BuildChildren, BuildWorldChildren}; - use crate::prelude::{Children, Parent, PreviousParent}; + use smallvec::{smallvec, SmallVec}; + use bevy_ecs::{ component::Component, entity::Entity, system::{CommandQueue, Commands}, world::World, }; - use smallvec::{smallvec, SmallVec}; + + use crate::prelude::{Children, Parent, PreviousParent}; + + use super::{BuildChildren, BuildWorldChildren}; #[derive(Component)] struct C(u32); @@ -479,12 +521,13 @@ mod tests { let mut queue = CommandQueue::default(); let mut commands = Commands::new(&mut queue, &world); - let mut children = Vec::new(); let parent = commands.spawn().insert(C(1)).id(); - commands.entity(parent).with_children(|parent| { - children.push(parent.spawn().insert(C(2)).id()); - children.push(parent.spawn().insert(C(3)).id()); - children.push(parent.spawn().insert(C(4)).id()); + let children = commands.entity(parent).add_children(|parent| { + [ + parent.spawn().insert(C(2)).id(), + parent.spawn().insert(C(3)).id(), + parent.spawn().insert(C(4)).id(), + ] }); queue.apply(&mut world); diff --git a/crates/bevy_hierarchy/src/components/children.rs b/crates/bevy_hierarchy/src/components/children.rs index 96097a1da8a59..d6c1e42604504 100644 --- a/crates/bevy_hierarchy/src/components/children.rs +++ b/crates/bevy_hierarchy/src/components/children.rs @@ -4,6 +4,7 @@ use bevy_ecs::{ reflect::{ReflectComponent, ReflectMapEntities}, }; use bevy_reflect::Reflect; +use core::slice; use smallvec::SmallVec; use std::ops::Deref; @@ -41,3 +42,13 @@ impl Deref for Children { &self.0[..] } } + +impl<'a> IntoIterator for &'a Children { + type Item = ::Item; + + type IntoIter = slice::Iter<'a, Entity>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} diff --git a/crates/bevy_hierarchy/src/components/parent.rs b/crates/bevy_hierarchy/src/components/parent.rs index ddc0e2a634ee6..525e56bdecd75 100644 --- a/crates/bevy_hierarchy/src/components/parent.rs +++ b/crates/bevy_hierarchy/src/components/parent.rs @@ -25,7 +25,11 @@ impl FromWorld for Parent { impl MapEntities for Parent { fn map_entities(&mut self, entity_map: &EntityMap) -> Result<(), MapEntitiesError> { - self.0 = entity_map.get(self.0)?; + // Parent of an entity in the new world can be in outside world, in which case it + // should not be mapped. + if let Ok(mapped_entity) = entity_map.get(self.0) { + self.0 = mapped_entity; + } Ok(()) } } @@ -51,7 +55,11 @@ pub struct PreviousParent(pub(crate) Entity); impl MapEntities for PreviousParent { fn map_entities(&mut self, entity_map: &EntityMap) -> Result<(), MapEntitiesError> { - self.0 = entity_map.get(self.0)?; + // PreviousParent of an entity in the new world can be in outside world, in which + // case it should not be mapped. + if let Ok(mapped_entity) = entity_map.get(self.0) { + self.0 = mapped_entity; + } Ok(()) } } diff --git a/crates/bevy_input/src/input.rs b/crates/bevy_input/src/input.rs index 0c654af0ce92e..86f102dcb5b0b 100644 --- a/crates/bevy_input/src/input.rs +++ b/crates/bevy_input/src/input.rs @@ -82,6 +82,12 @@ where } } + /// Registers a release for all currently pressed inputs. + pub fn release_all(&mut self) { + // Move all items from pressed into just_released + self.just_released.extend(self.pressed.drain()); + } + /// Returns `true` if the `input` has just been pressed. pub fn just_pressed(&self, input: T) -> bool { self.just_pressed.contains(&input) @@ -123,7 +129,18 @@ where self.just_released.remove(&input); } + /// Clears the `pressed`, `just_pressed`, and `just_released` data for every input. + /// + /// See also [`Input::clear`] for simulating elapsed time steps. + pub fn reset_all(&mut self) { + self.pressed.clear(); + self.just_pressed.clear(); + self.just_released.clear(); + } + /// Clears the `just pressed` and `just released` data for every input. + /// + /// See also [`Input::reset_all`] for a full reset. pub fn clear(&mut self) { self.just_pressed.clear(); self.just_released.clear(); @@ -197,6 +214,17 @@ mod test { assert!(input.just_released.contains(&DummyInput::Input1)); } + #[test] + fn test_release_all() { + let mut input = Input::default(); + input.press(DummyInput::Input1); + input.press(DummyInput::Input2); + input.release_all(); + assert!(input.pressed.is_empty()); + assert!(input.just_released.contains(&DummyInput::Input1)); + assert!(input.just_released.contains(&DummyInput::Input2)); + } + #[test] fn test_just_pressed() { let mut input = Input::default(); @@ -284,6 +312,22 @@ mod test { assert!(!input.just_released(DummyInput::Input1)); } + #[test] + fn test_reset_all() { + let mut input = Input::default(); + + input.press(DummyInput::Input1); + input.press(DummyInput::Input2); + input.release(DummyInput::Input2); + assert!(input.pressed.contains(&DummyInput::Input1)); + assert!(input.just_pressed.contains(&DummyInput::Input1)); + assert!(input.just_released.contains(&DummyInput::Input2)); + input.reset_all(); + assert!(input.pressed.is_empty()); + assert!(input.just_pressed.is_empty()); + assert!(input.just_released.is_empty()); + } + #[test] fn test_clear() { let mut input = Input::default(); diff --git a/crates/bevy_input/src/mouse.rs b/crates/bevy_input/src/mouse.rs index c88c0295abffa..c8dc432094824 100644 --- a/crates/bevy_input/src/mouse.rs +++ b/crates/bevy_input/src/mouse.rs @@ -56,7 +56,7 @@ pub struct MouseMotion { /// /// The value of the event can either be interpreted as the amount of lines or the amount of pixels /// to scroll. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum MouseScrollUnit { /// The line scroll unit. /// diff --git a/crates/bevy_input/src/touch.rs b/crates/bevy_input/src/touch.rs index 4ba55c0118967..7333ac2d3eacc 100644 --- a/crates/bevy_input/src/touch.rs +++ b/crates/bevy_input/src/touch.rs @@ -224,6 +224,11 @@ impl Touches { self.pressed.get(&id) } + /// Checks if any touch input was just pressed. + pub fn any_just_pressed(&self) -> bool { + !self.just_pressed.is_empty() + } + /// Returns `true` if the input corresponding to the `id` has just been pressed. pub fn just_pressed(&self, id: u64) -> bool { self.just_pressed.contains_key(&id) @@ -239,6 +244,11 @@ impl Touches { self.just_released.get(&id) } + /// Checks if any touch input was just released. + pub fn any_just_released(&self) -> bool { + !self.just_released.is_empty() + } + /// Returns `true` if the input corresponding to the `id` has just been released. pub fn just_released(&self, id: u64) -> bool { self.just_released.contains_key(&id) @@ -249,6 +259,11 @@ impl Touches { self.just_released.values() } + /// Checks if any touch input was just cancelled. + pub fn any_just_cancelled(&self) -> bool { + !self.just_cancelled.is_empty() + } + /// Returns `true` if the input corresponding to the `id` has just been cancelled. pub fn just_cancelled(&self, id: u64) -> bool { self.just_cancelled.contains_key(&id) @@ -259,6 +274,11 @@ impl Touches { self.just_cancelled.values() } + /// Retrieves the position of the first currently pressed touch, if any + pub fn first_pressed_position(&self) -> Option { + self.pressed.values().next().map(|t| t.position) + } + /// Processes a [`TouchInput`] event by updating the `pressed`, `just_pressed`, /// `just_released`, and `just_cancelled` collections. fn process_touch_event(&mut self, event: &TouchInput) { diff --git a/crates/bevy_internal/Cargo.toml b/crates/bevy_internal/Cargo.toml index da855436ff301..7fccda37863d0 100644 --- a/crates/bevy_internal/Cargo.toml +++ b/crates/bevy_internal/Cargo.toml @@ -12,14 +12,14 @@ categories = ["game-engines", "graphics", "gui", "rendering"] [features] trace = [ "bevy_app/trace", - "bevy_core_pipeline/trace", + "bevy_core_pipeline?/trace", "bevy_ecs/trace", "bevy_log/trace", - "bevy_render/trace", + "bevy_render?/trace", "bevy_hierarchy/trace" ] trace_chrome = [ "bevy_log/tracing-chrome" ] -trace_tracy = ["bevy_render/tracing-tracy", "bevy_log/tracing-tracy" ] +trace_tracy = ["bevy_render?/tracing-tracy", "bevy_log/tracing-tracy" ] wgpu_trace = ["bevy_render/wgpu_trace"] debug_asset_server = ["bevy_asset/debug_asset_server"] @@ -55,13 +55,13 @@ x11 = ["bevy_winit/x11"] subpixel_glyph_atlas = ["bevy_text/subpixel_glyph_atlas"] # Optimise for WebGL2 -webgl = ["bevy_pbr/webgl", "bevy_render/webgl"] +webgl = ["bevy_core_pipeline?/webgl", "bevy_pbr?/webgl", "bevy_render?/webgl"] # enable systems that allow for automated testing on CI bevy_ci_testing = ["bevy_app/bevy_ci_testing", "bevy_render/ci_limits"] # Enable animation support, and glTF animation loading -animation = ["bevy_animation", "bevy_gltf/bevy_animation"] +animation = ["bevy_animation", "bevy_gltf?/bevy_animation"] [dependencies] # bevy @@ -78,6 +78,7 @@ bevy_math = { path = "../bevy_math", version = "0.8.0-dev" } bevy_ptr = { path = "../bevy_ptr", version = "0.8.0-dev" } bevy_reflect = { path = "../bevy_reflect", version = "0.8.0-dev", features = ["bevy"] } bevy_scene = { path = "../bevy_scene", version = "0.8.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.8.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.8.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } bevy_window = { path = "../bevy_window", version = "0.8.0-dev" } @@ -97,4 +98,6 @@ bevy_winit = { path = "../bevy_winit", optional = true, version = "0.8.0-dev" } bevy_gilrs = { path = "../bevy_gilrs", optional = true, version = "0.8.0-dev" } [target.'cfg(target_os = "android")'.dependencies] -ndk-glue = {version = "0.6", features = ["logger"]} +# This version *must* be the same as the version used by winit, +# or Android will break: https://github.com/rust-windowing/winit#android +ndk-glue = {version = "0.5", features = ["logger"]} diff --git a/crates/bevy_internal/src/default_plugins.rs b/crates/bevy_internal/src/default_plugins.rs index d5a6df4fd68ef..7bdb4c8af4ae4 100644 --- a/crates/bevy_internal/src/default_plugins.rs +++ b/crates/bevy_internal/src/default_plugins.rs @@ -3,6 +3,7 @@ use bevy_app::{PluginGroup, PluginGroupBuilder}; /// This plugin group will add all the default plugins: /// * [`LogPlugin`](bevy_log::LogPlugin) /// * [`CorePlugin`](bevy_core::CorePlugin) +/// * [`TimePlugin`](bevy_time::TimePlugin) /// * [`TransformPlugin`](bevy_transform::TransformPlugin) /// * [`HierarchyPlugin`](bevy_hierarchy::HierarchyPlugin) /// * [`DiagnosticsPlugin`](bevy_diagnostic::DiagnosticsPlugin) @@ -27,6 +28,7 @@ impl PluginGroup for DefaultPlugins { fn build(&mut self, group: &mut PluginGroupBuilder) { group.add(bevy_log::LogPlugin::default()); group.add(bevy_core::CorePlugin::default()); + group.add(bevy_time::TimePlugin::default()); group.add(bevy_transform::TransformPlugin::default()); group.add(bevy_hierarchy::HierarchyPlugin::default()); group.add(bevy_diagnostic::DiagnosticsPlugin::default()); @@ -76,6 +78,7 @@ impl PluginGroup for DefaultPlugins { /// Minimal plugin group that will add the following plugins: /// * [`CorePlugin`](bevy_core::CorePlugin) +/// * [`TimePlugin`](bevy_time::TimePlugin) /// * [`ScheduleRunnerPlugin`](bevy_app::ScheduleRunnerPlugin) /// /// See also [`DefaultPlugins`] for a more complete set of plugins @@ -84,6 +87,7 @@ pub struct MinimalPlugins; impl PluginGroup for MinimalPlugins { fn build(&mut self, group: &mut PluginGroupBuilder) { group.add(bevy_core::CorePlugin::default()); + group.add(bevy_time::TimePlugin::default()); group.add(bevy_app::ScheduleRunnerPlugin::default()); } } diff --git a/crates/bevy_internal/src/lib.rs b/crates/bevy_internal/src/lib.rs index 95d1cb35e234c..cc63909ab3cb8 100644 --- a/crates/bevy_internal/src/lib.rs +++ b/crates/bevy_internal/src/lib.rs @@ -18,7 +18,7 @@ pub mod asset { } pub mod core { - //! Contains core plugins and utilities for time. + //! Contains core plugins. pub use bevy_core::*; } @@ -70,6 +70,11 @@ pub mod tasks { pub use bevy_tasks::*; } +pub mod time { + //! Contains time utilities. + pub use bevy_time::*; +} + pub mod hierarchy { //! Entity hierarchies and property inheritance pub use bevy_hierarchy::*; diff --git a/crates/bevy_internal/src/prelude.rs b/crates/bevy_internal/src/prelude.rs index 2a9b4553ec97d..fd2cb9d848327 100644 --- a/crates/bevy_internal/src/prelude.rs +++ b/crates/bevy_internal/src/prelude.rs @@ -2,7 +2,8 @@ pub use crate::{ app::prelude::*, asset::prelude::*, core::prelude::*, ecs::prelude::*, hierarchy::prelude::*, input::prelude::*, log::prelude::*, math::prelude::*, reflect::prelude::*, scene::prelude::*, - transform::prelude::*, utils::prelude::*, window::prelude::*, DefaultPlugins, MinimalPlugins, + time::prelude::*, transform::prelude::*, utils::prelude::*, window::prelude::*, DefaultPlugins, + MinimalPlugins, }; pub use bevy_derive::{bevy_main, Deref, DerefMut}; diff --git a/crates/bevy_log/Cargo.toml b/crates/bevy_log/Cargo.toml index 163a07a196458..186784f24a92f 100644 --- a/crates/bevy_log/Cargo.toml +++ b/crates/bevy_log/Cargo.toml @@ -17,7 +17,7 @@ bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } tracing-subscriber = {version = "0.3.1", features = ["registry", "env-filter"]} tracing-chrome = { version = "0.4.0", optional = true } -tracing-tracy = { version = "0.8.0", optional = true } +tracing-tracy = { version = "0.10.0", optional = true } tracing-log = "0.1.2" tracing-error = { version = "0.2.0", optional = true } diff --git a/crates/bevy_mikktspace/Cargo.toml b/crates/bevy_mikktspace/Cargo.toml new file mode 100644 index 0000000000000..c456b3008850f --- /dev/null +++ b/crates/bevy_mikktspace/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "bevy_mikktspace" +version = "0.8.0-dev" +edition = "2021" +authors = ["Benjamin Wasty ", "David Harvey-Macaulay ", "Layl Bongers "] +description = "Mikkelsen tangent space algorithm" +documentation = "https://docs.rs/bevy" +homepage = "https://bevyengine.org" +repository = "https://github.com/bevyengine/bevy" +license = "Zlib AND (MIT OR Apache-2.0)" +keywords = ["bevy", "3D", "graphics", "algorithm", "tangent"] + +[dependencies] +glam = "0.20.0" + +[[example]] +name = "generate" diff --git a/crates/bevy_mikktspace/LICENSE-APACHE b/crates/bevy_mikktspace/LICENSE-APACHE new file mode 100644 index 0000000000000..1b5ec8b78e237 --- /dev/null +++ b/crates/bevy_mikktspace/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/crates/bevy_mikktspace/LICENSE-MIT b/crates/bevy_mikktspace/LICENSE-MIT new file mode 100644 index 0000000000000..16b0f84d02846 --- /dev/null +++ b/crates/bevy_mikktspace/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright (c) 2017 The mikktspace Library Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/crates/bevy_mikktspace/README.md b/crates/bevy_mikktspace/README.md new file mode 100644 index 0000000000000..b5f886ce7abf5 --- /dev/null +++ b/crates/bevy_mikktspace/README.md @@ -0,0 +1,35 @@ +# bevy_mikktspace + +This is a fork of [https://github.com/gltf-rs/mikktspace](https://github.com/gltf-rs/mikktspace), which in turn is a port of the Mikkelsen Tangent Space Algorithm reference implementation to Rust. It has been forked for use in the bevy game engine to be able to update maths crate dependencies in lock-step with bevy releases. It is vendored in the bevy repository itself as [crates/bevy_mikktspace](https://github.com/bevyengine/bevy/tree/main/crates/bevy_mikktspace). + +Port of the [Mikkelsen Tangent Space Algorithm](https://en.blender.org/index.php/Dev:Shading/Tangent_Space_Normal_Maps) reference implementation. + +Requires at least Rust 1.52.1. + +## Examples + +### generate + +Demonstrates generating tangents for a cube with 4 triangular faces per side. + +```sh +cargo run --example generate +``` + +## License agreement + +Licensed under either of + +* Apache License, Version 2.0 + ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) +* MIT license + ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT)) + +at your option. AND parts of the code are licensed under: + +* Zlib license + [https://opensource.org/licenses/Zlib](https://opensource.org/licenses/Zlib) + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/crates/bevy_mikktspace/examples/cube.obj b/crates/bevy_mikktspace/examples/cube.obj new file mode 100644 index 0000000000000..c0cf02efda305 --- /dev/null +++ b/crates/bevy_mikktspace/examples/cube.obj @@ -0,0 +1,114 @@ +v 0.5 -0.5 0.5 +v 0.5 -0.5 -0.5 +v 0.5 0.5 -0.5 +v 0.5 0.5 0.5 +v 0.5 0 0 +v -0.5 0.5 0.5 +v -0.5 0.5 -0.5 +v -0.5 -0.5 -0.5 +v -0.5 -0.5 0.5 +v -0.5 0 0 +v 0.5 0.5 0.5 +v 0.5 0.5 -0.5 +v -0.5 0.5 -0.5 +v -0.5 0.5 0.5 +v 0 0.5 0 +v -0.5 -0.5 0.5 +v -0.5 -0.5 -0.5 +v 0.5 -0.5 -0.5 +v 0.5 -0.5 0.5 +v 0 -0.5 0 +v -0.5 0.5 0.5 +v -0.5 -0.5 0.5 +v 0.5 -0.5 0.5 +v 0.5 0.5 0.5 +v 0 0 0.5 +v 0.5 0.5 -0.5 +v 0.5 -0.5 -0.5 +v -0.5 -0.5 -0.5 +v -0.5 0.5 -0.5 +v 0 0 -0.5 +vn 0.57735026 -0.57735026 0.57735026 +vn 0.57735026 -0.57735026 -0.57735026 +vn 0.57735026 0.57735026 -0.57735026 +vn 0.57735026 0.57735026 0.57735026 +vn 1 0 0 +vn -0.57735026 0.57735026 0.57735026 +vn -0.57735026 0.57735026 -0.57735026 +vn -0.57735026 -0.57735026 -0.57735026 +vn -0.57735026 -0.57735026 0.57735026 +vn -1 0 0 +vn 0.57735026 0.57735026 0.57735026 +vn 0.57735026 0.57735026 -0.57735026 +vn -0.57735026 0.57735026 -0.57735026 +vn -0.57735026 0.57735026 0.57735026 +vn 0 1 0 +vn -0.57735026 -0.57735026 0.57735026 +vn -0.57735026 -0.57735026 -0.57735026 +vn 0.57735026 -0.57735026 -0.57735026 +vn 0.57735026 -0.57735026 0.57735026 +vn 0 -1 0 +vn -0.57735026 0.57735026 0.57735026 +vn -0.57735026 -0.57735026 0.57735026 +vn 0.57735026 -0.57735026 0.57735026 +vn 0.57735026 0.57735026 0.57735026 +vn 0 0 1 +vn 0.57735026 0.57735026 -0.57735026 +vn 0.57735026 -0.57735026 -0.57735026 +vn -0.57735026 -0.57735026 -0.57735026 +vn -0.57735026 0.57735026 -0.57735026 +vn 0 0 -1 +vt 0 0 +vt 0 1 +vt 1 1 +vt 1 0 +vt 0.5 0.5 +vt 1 0 +vt 1 1 +vt 0 1 +vt 0 0 +vt 0.5 0.5 +vt 0 0 +vt 0 1 +vt 0 1 +vt 0 0 +vt 0 0.5 +vt 0 0 +vt 0 1 +vt 0 1 +vt 0 0 +vt 0 0.5 +vt 0 0 +vt 0 1 +vt 1 1 +vt 1 0 +vt 0.5 0.5 +vt 1 0 +vt 1 1 +vt 0 1 +vt 0 0 +vt 0.5 0.5 +f 1/1/1 2/2/2 5/5/5 +f 2/2/2 3/3/3 5/5/5 +f 3/3/3 4/4/4 5/5/5 +f 4/4/4 1/1/1 5/5/5 +f 6/6/6 7/7/7 10/10/10 +f 7/7/7 8/8/8 10/10/10 +f 8/8/8 9/9/9 10/10/10 +f 9/9/9 6/6/6 10/10/10 +f 11/11/11 12/12/12 15/15/15 +f 12/12/12 13/13/13 15/15/15 +f 13/13/13 14/14/14 15/15/15 +f 14/14/14 11/11/11 15/15/15 +f 16/16/16 17/17/17 20/20/20 +f 17/17/17 18/18/18 20/20/20 +f 18/18/18 19/19/19 20/20/20 +f 19/19/19 16/16/16 20/20/20 +f 21/21/21 22/22/22 25/25/25 +f 22/22/22 23/23/23 25/25/25 +f 23/23/23 24/24/24 25/25/25 +f 24/24/24 21/21/21 25/25/25 +f 26/26/26 27/27/27 30/30/30 +f 27/27/27 28/28/28 30/30/30 +f 28/28/28 29/29/29 30/30/30 +f 29/29/29 26/26/26 30/30/30 diff --git a/crates/bevy_mikktspace/examples/generate.rs b/crates/bevy_mikktspace/examples/generate.rs new file mode 100644 index 0000000000000..a8cefb8809ff4 --- /dev/null +++ b/crates/bevy_mikktspace/examples/generate.rs @@ -0,0 +1,259 @@ +#![allow(clippy::bool_assert_comparison, clippy::useless_conversion)] + +use glam::{Vec2, Vec3}; + +pub type Face = [u32; 3]; + +#[derive(Debug)] +struct Vertex { + position: Vec3, + normal: Vec3, + tex_coord: Vec2, +} + +struct Mesh { + faces: Vec, + vertices: Vec, +} + +fn vertex(mesh: &Mesh, face: usize, vert: usize) -> &Vertex { + let vs: &[u32; 3] = &mesh.faces[face]; + &mesh.vertices[vs[vert] as usize] +} + +impl bevy_mikktspace::Geometry for Mesh { + fn num_faces(&self) -> usize { + self.faces.len() + } + + fn num_vertices_of_face(&self, _face: usize) -> usize { + 3 + } + + fn position(&self, face: usize, vert: usize) -> [f32; 3] { + vertex(self, face, vert).position.into() + } + + fn normal(&self, face: usize, vert: usize) -> [f32; 3] { + vertex(self, face, vert).normal.into() + } + + fn tex_coord(&self, face: usize, vert: usize) -> [f32; 2] { + vertex(self, face, vert).tex_coord.into() + } + + fn set_tangent_encoded(&mut self, tangent: [f32; 4], face: usize, vert: usize) { + println!( + "{face}-{vert}: v: {v:?}, vn: {vn:?}, vt: {vt:?}, vx: {vx:?}", + face = face, + vert = vert, + v = vertex(self, face, vert).position, + vn = vertex(self, face, vert).normal, + vt = vertex(self, face, vert).tex_coord, + vx = tangent, + ); + } +} + +fn make_cube() -> Mesh { + struct ControlPoint { + uv: [f32; 2], + dir: [f32; 3], + } + let mut faces = Vec::new(); + let mut ctl_pts = Vec::new(); + let mut vertices = Vec::new(); + + // +x plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [1.0, -1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [1.0, -1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [1.0, 1.0], + dir: [1.0, 1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [1.0, 0.0], + dir: [1.0, 1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.5, 0.5], + dir: [1.0, 0.0, 0.0], + }); + } + + // -x plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint { + uv: [1.0, 0.0], + dir: [-1.0, 1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [1.0, 1.0], + dir: [-1.0, 1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [-1.0, -1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [-1.0, -1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.5, 0.5], + dir: [-1.0, 0.0, 0.0], + }); + } + + // +y plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [1.0, 1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [1.0, 1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [-1.0, 1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [-1.0, 1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.5], + dir: [0.0, 1.0, 0.0], + }); + } + + // -y plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [-1.0, -1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [-1.0, -1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [1.0, -1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [1.0, -1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.5], + dir: [0.0, -1.0, 0.0], + }); + } + + // +z plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [-1.0, 1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [-1.0, -1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [1.0, 1.0], + dir: [1.0, -1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [1.0, 0.0], + dir: [1.0, 1.0, 1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.5, 0.5], + dir: [0.0, 0.0, 1.0], + }); + } + + // -z plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint { + uv: [1.0, 0.0], + dir: [1.0, 1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [1.0, 1.0], + dir: [1.0, -1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 1.0], + dir: [-1.0, -1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.0, 0.0], + dir: [-1.0, 1.0, -1.0], + }); + ctl_pts.push(ControlPoint { + uv: [0.5, 0.5], + dir: [0.0, 0.0, -1.0], + }); + } + + for pt in ctl_pts { + let p: Vec3 = pt.dir.into(); + let n: Vec3 = p.normalize(); + let t: Vec2 = pt.uv.into(); + vertices.push(Vertex { + position: (p / 2.0).into(), + normal: n.into(), + tex_coord: t.into(), + }); + } + + Mesh { faces, vertices } +} + +fn main() { + let mut cube = make_cube(); + let ret = bevy_mikktspace::generate_tangents(&mut cube); + assert_eq!(true, ret); +} diff --git a/crates/bevy_mikktspace/src/generated.rs b/crates/bevy_mikktspace/src/generated.rs new file mode 100644 index 0000000000000..c05b8c1566ee3 --- /dev/null +++ b/crates/bevy_mikktspace/src/generated.rs @@ -0,0 +1,1809 @@ +//! Everything in this module is pending to be refactored, turned into idiomatic-rust, and moved to +//! other modules. + +//! The contents of this file are a combination of transpilation and human +//! modification to Morten S. Mikkelsen's original tangent space algorithm +//! implementation written in C. The original source code can be found at +//! +//! and includes the following licence: +//! +//! Copyright (C) 2011 by Morten S. Mikkelsen +//! +//! This software is provided 'as-is', without any express or implied +//! warranty. In no event will the authors be held liable for any damages +//! arising from the use of this software. +//! +//! Permission is granted to anyone to use this software for any purpose, +//! including commercial applications, and to alter it and redistribute it +//! freely, subject to the following restrictions: +//! +//! 1. The origin of this software must not be misrepresented; you must not +//! claim that you wrote the original software. If you use this software +//! in a product, an acknowledgment in the product documentation would be +//! appreciated but is not required. +//! +//! 2. Altered source versions must be plainly marked as such, and must not be +//! misrepresented as being the original software. +//! +//! 3. This notice may not be removed or altered from any source distribution. + +#![allow( + clippy::all, + clippy::doc_markdown, + clippy::redundant_else, + clippy::match_same_arms, + clippy::semicolon_if_nothing_returned, + clippy::explicit_iter_loop, + clippy::map_flatten, + dead_code, + mutable_transmutes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unused_mut, + unused_assignments, + unused_variables +)] + +use std::ptr::null_mut; + +use glam::Vec3; + +use crate::{face_vert_to_index, get_normal, get_position, get_tex_coord, Geometry}; + +#[derive(Copy, Clone)] +pub struct STSpace { + pub vOs: Vec3, + pub fMagS: f32, + pub vOt: Vec3, + pub fMagT: f32, + pub iCounter: i32, + pub bOrient: bool, +} + +impl STSpace { + pub fn zero() -> Self { + Self { + vOs: Default::default(), + fMagS: 0.0, + vOt: Default::default(), + fMagT: 0.0, + iCounter: 0, + bOrient: false, + } + } +} + +// To avoid visual errors (distortions/unwanted hard edges in lighting), when using sampled normal maps, the +// normal map sampler must use the exact inverse of the pixel shader transformation. +// The most efficient transformation we can possibly do in the pixel shader is +// achieved by using, directly, the "unnormalized" interpolated tangent, bitangent and vertex normal: vT, vB and vN. +// pixel shader (fast transform out) +// vNout = normalize( vNt.x * vT + vNt.y * vB + vNt.z * vN ); +// where vNt is the tangent space normal. The normal map sampler must likewise use the +// interpolated and "unnormalized" tangent, bitangent and vertex normal to be compliant with the pixel shader. +// sampler does (exact inverse of pixel shader): +// float3 row0 = cross(vB, vN); +// float3 row1 = cross(vN, vT); +// float3 row2 = cross(vT, vB); +// float fSign = dot(vT, row0)<0 ? -1 : 1; +// vNt = normalize( fSign * float3(dot(vNout,row0), dot(vNout,row1), dot(vNout,row2)) ); +// where vNout is the sampled normal in some chosen 3D space. +// +// Should you choose to reconstruct the bitangent in the pixel shader instead +// of the vertex shader, as explained earlier, then be sure to do this in the normal map sampler also. +// Finally, beware of quad triangulations. If the normal map sampler doesn't use the same triangulation of +// quads as your renderer then problems will occur since the interpolated tangent spaces will differ +// eventhough the vertex level tangent spaces match. This can be solved either by triangulating before +// sampling/exporting or by using the order-independent choice of diagonal for splitting quads suggested earlier. +// However, this must be used both by the sampler and your tools/rendering pipeline. +// internal structure + +#[derive(Copy, Clone)] +pub struct STriInfo { + pub FaceNeighbors: [i32; 3], + pub AssignedGroup: [*mut SGroup; 3], + pub vOs: Vec3, + pub vOt: Vec3, + pub fMagS: f32, + pub fMagT: f32, + pub iOrgFaceNumber: i32, + pub iFlag: i32, + pub iTSpacesOffs: i32, + pub vert_num: [u8; 4], +} + +impl STriInfo { + fn zero() -> Self { + Self { + FaceNeighbors: [0, 0, 0], + AssignedGroup: [null_mut(), null_mut(), null_mut()], + vOs: Default::default(), + vOt: Default::default(), + fMagS: 0.0, + fMagT: 0.0, + iOrgFaceNumber: 0, + iFlag: 0, + iTSpacesOffs: 0, + vert_num: [0, 0, 0, 0], + } + } +} + +#[derive(Copy, Clone)] +pub struct SGroup { + pub iNrFaces: i32, + pub pFaceIndices: *mut i32, + pub iVertexRepresentitive: i32, + pub bOrientPreservering: bool, +} + +impl SGroup { + fn zero() -> Self { + Self { + iNrFaces: 0, + pFaceIndices: null_mut(), + iVertexRepresentitive: 0, + bOrientPreservering: false, + } + } +} + +#[derive(Clone)] +pub struct SSubGroup { + pub iNrFaces: i32, + pub pTriMembers: Vec, +} + +impl SSubGroup { + fn zero() -> Self { + Self { + iNrFaces: 0, + pTriMembers: Vec::new(), + } + } +} + +#[derive(Copy, Clone)] +pub union SEdge { + pub unnamed: unnamed, + pub array: [i32; 3], +} + +impl SEdge { + fn zero() -> Self { + Self { array: [0, 0, 0] } + } +} + +#[derive(Copy, Clone)] +pub struct unnamed { + pub i0: i32, + pub i1: i32, + pub f: i32, +} + +#[derive(Copy, Clone)] +pub struct STmpVert { + pub vert: [f32; 3], + pub index: i32, +} + +impl STmpVert { + fn zero() -> Self { + Self { + vert: [0.0, 0.0, 0.0], + index: 0, + } + } +} + +pub unsafe fn genTangSpace(geometry: &mut I, fAngularThreshold: f32) -> bool { + let mut iNrTrianglesIn = 0; + let mut f = 0; + let mut t = 0; + let mut i = 0; + let mut iNrTSPaces = 0; + let mut iTotTris = 0; + let mut iDegenTriangles = 0; + let mut iNrMaxGroups = 0; + let mut iNrActiveGroups: i32 = 0i32; + let mut index = 0; + let iNrFaces = geometry.num_faces(); + let mut bRes: bool = false; + let fThresCos: f32 = + ((fAngularThreshold * 3.14159265358979323846f64 as f32 / 180.0f32) as f64).cos() as f32; + f = 0; + while f < iNrFaces { + let verts = geometry.num_vertices_of_face(f); + if verts == 3 { + iNrTrianglesIn += 1 + } else if verts == 4 { + iNrTrianglesIn += 2 + } + f += 1 + } + if iNrTrianglesIn <= 0 { + return false; + } + + let mut piTriListIn = vec![0i32; 3 * iNrTrianglesIn]; + let mut pTriInfos = vec![STriInfo::zero(); iNrTrianglesIn]; + + iNrTSPaces = GenerateInitialVerticesIndexList( + &mut pTriInfos, + &mut piTriListIn, + geometry, + iNrTrianglesIn, + ); + GenerateSharedVerticesIndexList(piTriListIn.as_mut_ptr(), geometry, iNrTrianglesIn); + iTotTris = iNrTrianglesIn; + iDegenTriangles = 0; + t = 0; + while t < iTotTris as usize { + let i0 = piTriListIn[t * 3 + 0]; + let i1 = piTriListIn[t * 3 + 1]; + let i2 = piTriListIn[t * 3 + 2]; + let p0 = get_position(geometry, i0 as usize); + let p1 = get_position(geometry, i1 as usize); + let p2 = get_position(geometry, i2 as usize); + if p0 == p1 || p0 == p2 || p1 == p2 { + pTriInfos[t].iFlag |= 1i32; + iDegenTriangles += 1 + } + t += 1 + } + iNrTrianglesIn = iTotTris - iDegenTriangles; + DegenPrologue( + pTriInfos.as_mut_ptr(), + piTriListIn.as_mut_ptr(), + iNrTrianglesIn as i32, + iTotTris as i32, + ); + InitTriInfo( + pTriInfos.as_mut_ptr(), + piTriListIn.as_ptr(), + geometry, + iNrTrianglesIn, + ); + iNrMaxGroups = iNrTrianglesIn * 3; + + let mut pGroups = vec![SGroup::zero(); iNrMaxGroups]; + let mut piGroupTrianglesBuffer = vec![0; iNrTrianglesIn * 3]; + + iNrActiveGroups = Build4RuleGroups( + pTriInfos.as_mut_ptr(), + pGroups.as_mut_ptr(), + piGroupTrianglesBuffer.as_mut_ptr(), + piTriListIn.as_ptr(), + iNrTrianglesIn as i32, + ); + + let mut psTspace = vec![ + STSpace { + vOs: Vec3::new(1.0, 0.0, 0.0), + fMagS: 1.0, + vOt: Vec3::new(0.0, 1.0, 0.0), + fMagT: 1.0, + ..STSpace::zero() + }; + iNrTSPaces + ]; + + bRes = GenerateTSpaces( + &mut psTspace, + pTriInfos.as_ptr(), + pGroups.as_ptr(), + iNrActiveGroups, + piTriListIn.as_ptr(), + fThresCos, + geometry, + ); + if !bRes { + return false; + } + DegenEpilogue( + psTspace.as_mut_ptr(), + pTriInfos.as_mut_ptr(), + piTriListIn.as_mut_ptr(), + geometry, + iNrTrianglesIn as i32, + iTotTris as i32, + ); + index = 0; + f = 0; + while f < iNrFaces { + let verts_0 = geometry.num_vertices_of_face(f); + if !(verts_0 != 3 && verts_0 != 4) { + i = 0; + while i < verts_0 { + let mut pTSpace: *const STSpace = &mut psTspace[index] as *mut STSpace; + let mut tang = Vec3::new((*pTSpace).vOs.x, (*pTSpace).vOs.y, (*pTSpace).vOs.z); + let mut bitang = Vec3::new((*pTSpace).vOt.x, (*pTSpace).vOt.y, (*pTSpace).vOt.z); + geometry.set_tangent( + tang.into(), + bitang.into(), + (*pTSpace).fMagS, + (*pTSpace).fMagT, + (*pTSpace).bOrient, + f, + i, + ); + index += 1; + i += 1 + } + } + f += 1 + } + + return true; +} +unsafe fn DegenEpilogue( + mut psTspace: *mut STSpace, + mut pTriInfos: *mut STriInfo, + mut piTriListIn: *mut i32, + geometry: &mut I, + iNrTrianglesIn: i32, + iTotTris: i32, +) { + let mut t: i32 = 0i32; + let mut i: i32 = 0i32; + t = iNrTrianglesIn; + while t < iTotTris { + let bSkip: bool = if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { + true + } else { + false + }; + if !bSkip { + i = 0i32; + while i < 3i32 { + let index1: i32 = *piTriListIn.offset((t * 3i32 + i) as isize); + let mut bNotFound: bool = true; + let mut j: i32 = 0i32; + while bNotFound && j < 3i32 * iNrTrianglesIn { + let index2: i32 = *piTriListIn.offset(j as isize); + if index1 == index2 { + bNotFound = false + } else { + j += 1 + } + } + if !bNotFound { + let iTri: i32 = j / 3i32; + let iVert: i32 = j % 3i32; + let iSrcVert: i32 = + (*pTriInfos.offset(iTri as isize)).vert_num[iVert as usize] as i32; + let iSrcOffs: i32 = (*pTriInfos.offset(iTri as isize)).iTSpacesOffs; + let iDstVert: i32 = (*pTriInfos.offset(t as isize)).vert_num[i as usize] as i32; + let iDstOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; + *psTspace.offset((iDstOffs + iDstVert) as isize) = + *psTspace.offset((iSrcOffs + iSrcVert) as isize) + } + i += 1 + } + } + t += 1 + } + t = 0i32; + while t < iNrTrianglesIn { + if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { + let mut vDstP = Vec3::new(0.0, 0.0, 0.0); + let mut iOrgF: i32 = -1i32; + let mut i_0: i32 = 0i32; + let mut bNotFound_0: bool = false; + let mut pV: *mut u8 = (*pTriInfos.offset(t as isize)).vert_num.as_mut_ptr(); + let mut iFlag: i32 = 1i32 << *pV.offset(0isize) as i32 + | 1i32 << *pV.offset(1isize) as i32 + | 1i32 << *pV.offset(2isize) as i32; + let mut iMissingIndex: i32 = 0i32; + if iFlag & 2i32 == 0i32 { + iMissingIndex = 1i32 + } else if iFlag & 4i32 == 0i32 { + iMissingIndex = 2i32 + } else if iFlag & 8i32 == 0i32 { + iMissingIndex = 3i32 + } + iOrgF = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + vDstP = get_position( + geometry, + face_vert_to_index(iOrgF as usize, iMissingIndex as usize), + ); + bNotFound_0 = true; + i_0 = 0i32; + while bNotFound_0 && i_0 < 3i32 { + let iVert_0: i32 = *pV.offset(i_0 as isize) as i32; + let vSrcP = get_position( + geometry, + face_vert_to_index(iOrgF as usize, iVert_0 as usize), + ); + if vSrcP == vDstP { + let iOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; + *psTspace.offset((iOffs + iMissingIndex) as isize) = + *psTspace.offset((iOffs + iVert_0) as isize); + bNotFound_0 = false + } else { + i_0 += 1 + } + } + } + t += 1 + } +} + +unsafe fn GenerateTSpaces( + psTspace: &mut [STSpace], + mut pTriInfos: *const STriInfo, + mut pGroups: *const SGroup, + iNrActiveGroups: i32, + mut piTriListIn: *const i32, + fThresCos: f32, + geometry: &mut I, +) -> bool { + let mut iMaxNrFaces: usize = 0; + let mut iUniqueTspaces = 0; + let mut g: i32 = 0i32; + let mut i: i32 = 0i32; + g = 0i32; + while g < iNrActiveGroups { + if iMaxNrFaces < (*pGroups.offset(g as isize)).iNrFaces as usize { + iMaxNrFaces = (*pGroups.offset(g as isize)).iNrFaces as usize + } + g += 1 + } + if iMaxNrFaces == 0 { + return true; + } + + let mut pSubGroupTspace = vec![STSpace::zero(); iMaxNrFaces]; + let mut pUniSubGroups = vec![SSubGroup::zero(); iMaxNrFaces]; + let mut pTmpMembers = vec![0i32; iMaxNrFaces]; + + iUniqueTspaces = 0; + g = 0i32; + while g < iNrActiveGroups { + let mut pGroup: *const SGroup = &*pGroups.offset(g as isize) as *const SGroup; + let mut iUniqueSubGroups = 0; + let mut s = 0; + i = 0i32; + while i < (*pGroup).iNrFaces { + let f: i32 = *(*pGroup).pFaceIndices.offset(i as isize); + let mut index: i32 = -1i32; + let mut iVertIndex: i32 = -1i32; + let mut iOF_1: i32 = -1i32; + let mut iMembers: usize = 0; + let mut j: i32 = 0i32; + let mut l: usize = 0; + let mut tmp_group: SSubGroup = SSubGroup { + iNrFaces: 0, + pTriMembers: Vec::new(), + }; + let mut bFound: bool = false; + let mut n = Vec3::new(0.0, 0.0, 0.0); + let mut vOs = Vec3::new(0.0, 0.0, 0.0); + let mut vOt = Vec3::new(0.0, 0.0, 0.0); + if (*pTriInfos.offset(f as isize)).AssignedGroup[0usize] == pGroup as *mut SGroup { + index = 0i32 + } else if (*pTriInfos.offset(f as isize)).AssignedGroup[1usize] == pGroup as *mut SGroup + { + index = 1i32 + } else if (*pTriInfos.offset(f as isize)).AssignedGroup[2usize] == pGroup as *mut SGroup + { + index = 2i32 + } + iVertIndex = *piTriListIn.offset((f * 3i32 + index) as isize); + n = get_normal(geometry, iVertIndex as usize); + let mut vOs = (*pTriInfos.offset(f as isize)).vOs + - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); + let mut vOt = (*pTriInfos.offset(f as isize)).vOt + - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); + if VNotZero(vOs) { + vOs = Normalize(vOs) + } + if VNotZero(vOt) { + vOt = Normalize(vOt) + } + iOF_1 = (*pTriInfos.offset(f as isize)).iOrgFaceNumber; + iMembers = 0; + j = 0i32; + while j < (*pGroup).iNrFaces { + let t: i32 = *(*pGroup).pFaceIndices.offset(j as isize); + let iOF_2: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + let mut vOs2 = (*pTriInfos.offset(t as isize)).vOs + - (n.dot((*pTriInfos.offset(t as isize)).vOs) * n); + let mut vOt2 = (*pTriInfos.offset(t as isize)).vOt + - (n.dot((*pTriInfos.offset(t as isize)).vOt) * n); + if VNotZero(vOs2) { + vOs2 = Normalize(vOs2) + } + if VNotZero(vOt2) { + vOt2 = Normalize(vOt2) + } + let bAny: bool = if ((*pTriInfos.offset(f as isize)).iFlag + | (*pTriInfos.offset(t as isize)).iFlag) + & 4i32 + != 0i32 + { + true + } else { + false + }; + let bSameOrgFace: bool = iOF_1 == iOF_2; + let fCosS: f32 = vOs.dot(vOs2); + let fCosT: f32 = vOt.dot(vOt2); + if bAny || bSameOrgFace || fCosS > fThresCos && fCosT > fThresCos { + let fresh0 = iMembers; + iMembers = iMembers + 1; + pTmpMembers[fresh0] = t + } + j += 1 + } + if iMembers > 1 { + let mut uSeed: u32 = 39871946i32 as u32; + QuickSort(pTmpMembers.as_mut_ptr(), 0i32, (iMembers - 1) as i32, uSeed); + } + tmp_group.iNrFaces = iMembers as i32; + tmp_group.pTriMembers = pTmpMembers.clone(); + bFound = false; + l = 0; + while l < iUniqueSubGroups && !bFound { + bFound = CompareSubGroups(&mut tmp_group, &mut pUniSubGroups[l]); + if !bFound { + l += 1 + } + } + if !bFound { + pUniSubGroups[iUniqueSubGroups].iNrFaces = iMembers as i32; + pUniSubGroups[iUniqueSubGroups].pTriMembers = tmp_group.pTriMembers.clone(); + + pSubGroupTspace[iUniqueSubGroups] = EvalTspace( + tmp_group.pTriMembers.as_mut_ptr(), + iMembers as i32, + piTriListIn, + pTriInfos, + geometry, + (*pGroup).iVertexRepresentitive, + ); + iUniqueSubGroups += 1 + } + let iOffs = (*pTriInfos.offset(f as isize)).iTSpacesOffs as usize; + let iVert = (*pTriInfos.offset(f as isize)).vert_num[index as usize] as usize; + let mut pTS_out: *mut STSpace = &mut psTspace[iOffs + iVert] as *mut STSpace; + if (*pTS_out).iCounter == 1i32 { + *pTS_out = AvgTSpace(pTS_out, &mut pSubGroupTspace[l]); + (*pTS_out).iCounter = 2i32; + (*pTS_out).bOrient = (*pGroup).bOrientPreservering + } else { + *pTS_out = pSubGroupTspace[l]; + (*pTS_out).iCounter = 1i32; + (*pTS_out).bOrient = (*pGroup).bOrientPreservering + } + i += 1 + } + iUniqueTspaces += iUniqueSubGroups; + g += 1 + } + return true; +} +unsafe fn AvgTSpace(mut pTS0: *const STSpace, mut pTS1: *const STSpace) -> STSpace { + let mut ts_res: STSpace = STSpace { + vOs: Vec3::new(0.0, 0.0, 0.0), + fMagS: 0., + vOt: Vec3::new(0.0, 0.0, 0.0), + fMagT: 0., + iCounter: 0, + bOrient: false, + }; + if (*pTS0).fMagS == (*pTS1).fMagS + && (*pTS0).fMagT == (*pTS1).fMagT + && (*pTS0).vOs == (*pTS1).vOs + && (*pTS0).vOt == (*pTS1).vOt + { + ts_res.fMagS = (*pTS0).fMagS; + ts_res.fMagT = (*pTS0).fMagT; + ts_res.vOs = (*pTS0).vOs; + ts_res.vOt = (*pTS0).vOt + } else { + ts_res.fMagS = 0.5f32 * ((*pTS0).fMagS + (*pTS1).fMagS); + ts_res.fMagT = 0.5f32 * ((*pTS0).fMagT + (*pTS1).fMagT); + ts_res.vOs = (*pTS0).vOs + (*pTS1).vOs; + ts_res.vOt = (*pTS0).vOt + (*pTS1).vOt; + if VNotZero(ts_res.vOs) { + ts_res.vOs = Normalize(ts_res.vOs) + } + if VNotZero(ts_res.vOt) { + ts_res.vOt = Normalize(ts_res.vOt) + } + } + return ts_res; +} + +unsafe fn Normalize(v: Vec3) -> Vec3 { + return (1.0 / v.length()) * v; +} + +unsafe fn VNotZero(v: Vec3) -> bool { + NotZero(v.x) || NotZero(v.y) || NotZero(v.z) +} + +unsafe fn NotZero(fX: f32) -> bool { + fX.abs() > 1.17549435e-38f32 +} + +unsafe fn EvalTspace( + mut face_indices: *mut i32, + iFaces: i32, + mut piTriListIn: *const i32, + mut pTriInfos: *const STriInfo, + geometry: &mut I, + iVertexRepresentitive: i32, +) -> STSpace { + let mut res: STSpace = STSpace { + vOs: Vec3::new(0.0, 0.0, 0.0), + fMagS: 0., + vOt: Vec3::new(0.0, 0.0, 0.0), + fMagT: 0., + iCounter: 0, + bOrient: false, + }; + let mut fAngleSum: f32 = 0i32 as f32; + let mut face: i32 = 0i32; + res.vOs.x = 0.0f32; + res.vOs.y = 0.0f32; + res.vOs.z = 0.0f32; + res.vOt.x = 0.0f32; + res.vOt.y = 0.0f32; + res.vOt.z = 0.0f32; + res.fMagS = 0i32 as f32; + res.fMagT = 0i32 as f32; + face = 0i32; + while face < iFaces { + let f: i32 = *face_indices.offset(face as isize); + if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 { + let mut n = Vec3::new(0.0, 0.0, 0.0); + let mut vOs = Vec3::new(0.0, 0.0, 0.0); + let mut vOt = Vec3::new(0.0, 0.0, 0.0); + let mut p0 = Vec3::new(0.0, 0.0, 0.0); + let mut p1 = Vec3::new(0.0, 0.0, 0.0); + let mut p2 = Vec3::new(0.0, 0.0, 0.0); + let mut v1 = Vec3::new(0.0, 0.0, 0.0); + let mut v2 = Vec3::new(0.0, 0.0, 0.0); + let mut fCos: f32 = 0.; + let mut fAngle: f32 = 0.; + let mut fMagS: f32 = 0.; + let mut fMagT: f32 = 0.; + let mut i: i32 = -1i32; + let mut index: i32 = -1i32; + let mut i0: i32 = -1i32; + let mut i1: i32 = -1i32; + let mut i2: i32 = -1i32; + if *piTriListIn.offset((3i32 * f + 0i32) as isize) == iVertexRepresentitive { + i = 0i32 + } else if *piTriListIn.offset((3i32 * f + 1i32) as isize) == iVertexRepresentitive { + i = 1i32 + } else if *piTriListIn.offset((3i32 * f + 2i32) as isize) == iVertexRepresentitive { + i = 2i32 + } + index = *piTriListIn.offset((3i32 * f + i) as isize); + n = get_normal(geometry, index as usize); + let mut vOs = (*pTriInfos.offset(f as isize)).vOs + - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); + let mut vOt = (*pTriInfos.offset(f as isize)).vOt + - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); + if VNotZero(vOs) { + vOs = Normalize(vOs) + } + if VNotZero(vOt) { + vOt = Normalize(vOt) + } + i2 = *piTriListIn.offset((3i32 * f + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); + i1 = *piTriListIn.offset((3i32 * f + i) as isize); + i0 = *piTriListIn.offset((3i32 * f + if i > 0i32 { i - 1i32 } else { 2i32 }) as isize); + p0 = get_position(geometry, i0 as usize); + p1 = get_position(geometry, i1 as usize); + p2 = get_position(geometry, i2 as usize); + v1 = p0 - p1; + v2 = p2 - p1; + let mut v1 = v1 - (n.dot(v1) * n); + if VNotZero(v1) { + v1 = Normalize(v1) + } + let mut v2 = v2 - (n.dot(v2) * n); + if VNotZero(v2) { + v2 = Normalize(v2) + } + let fCos = v1.dot(v2); + + let fCos = if fCos > 1i32 as f32 { + 1i32 as f32 + } else if fCos < -1i32 as f32 { + -1i32 as f32 + } else { + fCos + }; + fAngle = (fCos as f64).acos() as f32; + fMagS = (*pTriInfos.offset(f as isize)).fMagS; + fMagT = (*pTriInfos.offset(f as isize)).fMagT; + res.vOs = res.vOs + (fAngle * vOs); + res.vOt = res.vOt + (fAngle * vOt); + res.fMagS += fAngle * fMagS; + res.fMagT += fAngle * fMagT; + fAngleSum += fAngle + } + face += 1 + } + if VNotZero(res.vOs) { + res.vOs = Normalize(res.vOs) + } + if VNotZero(res.vOt) { + res.vOt = Normalize(res.vOt) + } + if fAngleSum > 0i32 as f32 { + res.fMagS /= fAngleSum; + res.fMagT /= fAngleSum + } + return res; +} + +unsafe fn CompareSubGroups(mut pg1: *const SSubGroup, mut pg2: *const SSubGroup) -> bool { + let mut bStillSame: bool = true; + let mut i = 0; + if (*pg1).iNrFaces != (*pg2).iNrFaces { + return false; + } + while i < (*pg1).iNrFaces as usize && bStillSame { + bStillSame = if (*pg1).pTriMembers[i] == (*pg2).pTriMembers[i] { + true + } else { + false + }; + if bStillSame { + i += 1 + } + } + return bStillSame; +} +unsafe fn QuickSort(mut pSortBuffer: *mut i32, mut iLeft: i32, mut iRight: i32, mut uSeed: u32) { + let mut iL: i32 = 0; + let mut iR: i32 = 0; + let mut n: i32 = 0; + let mut index: i32 = 0; + let mut iMid: i32 = 0; + let mut iTmp: i32 = 0; + + // Random + let mut t: u32 = uSeed & 31i32 as u32; + t = uSeed.rotate_left(t) | uSeed.rotate_right((32i32 as u32).wrapping_sub(t)); + uSeed = uSeed.wrapping_add(t).wrapping_add(3i32 as u32); + // Random end + + iL = iLeft; + iR = iRight; + n = iR - iL + 1i32; + index = uSeed.wrapping_rem(n as u32) as i32; + iMid = *pSortBuffer.offset((index + iL) as isize); + loop { + while *pSortBuffer.offset(iL as isize) < iMid { + iL += 1 + } + while *pSortBuffer.offset(iR as isize) > iMid { + iR -= 1 + } + if iL <= iR { + iTmp = *pSortBuffer.offset(iL as isize); + *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); + *pSortBuffer.offset(iR as isize) = iTmp; + iL += 1; + iR -= 1 + } + if !(iL <= iR) { + break; + } + } + if iLeft < iR { + QuickSort(pSortBuffer, iLeft, iR, uSeed); + } + if iL < iRight { + QuickSort(pSortBuffer, iL, iRight, uSeed); + }; +} +unsafe fn Build4RuleGroups( + mut pTriInfos: *mut STriInfo, + mut pGroups: *mut SGroup, + mut piGroupTrianglesBuffer: *mut i32, + mut piTriListIn: *const i32, + iNrTrianglesIn: i32, +) -> i32 { + let iNrMaxGroups: i32 = iNrTrianglesIn * 3i32; + let mut iNrActiveGroups: i32 = 0i32; + let mut iOffset: i32 = 0i32; + let mut f: i32 = 0i32; + let mut i: i32 = 0i32; + f = 0i32; + while f < iNrTrianglesIn { + i = 0i32; + while i < 3i32 { + if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 + && (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize].is_null() + { + let mut bOrPre: bool = false; + let mut neigh_indexL: i32 = 0; + let mut neigh_indexR: i32 = 0; + let vert_index: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); + let ref mut fresh2 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; + *fresh2 = &mut *pGroups.offset(iNrActiveGroups as isize) as *mut SGroup; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) + .iVertexRepresentitive = vert_index; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).bOrientPreservering = + (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces = 0i32; + let ref mut fresh3 = + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).pFaceIndices; + *fresh3 = &mut *piGroupTrianglesBuffer.offset(iOffset as isize) as *mut i32; + iNrActiveGroups += 1; + AddTriToGroup((*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], f); + bOrPre = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + neigh_indexL = (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize]; + neigh_indexR = (*pTriInfos.offset(f as isize)).FaceNeighbors + [(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; + if neigh_indexL >= 0i32 { + let bAnswer: bool = AssignRecur( + piTriListIn, + pTriInfos, + neigh_indexL, + (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], + ); + let bOrPre2: bool = + if (*pTriInfos.offset(neigh_indexL as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bDiff: bool = if bOrPre != bOrPre2 { true } else { false }; + } + if neigh_indexR >= 0i32 { + let bAnswer_0: bool = AssignRecur( + piTriListIn, + pTriInfos, + neigh_indexR, + (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], + ); + let bOrPre2_0: bool = + if (*pTriInfos.offset(neigh_indexR as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bDiff_0: bool = if bOrPre != bOrPre2_0 { true } else { false }; + } + iOffset += (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces + } + i += 1 + } + f += 1 + } + return iNrActiveGroups; +} +// /////////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////////// +unsafe fn AssignRecur( + mut piTriListIn: *const i32, + mut psTriInfos: *mut STriInfo, + iMyTriIndex: i32, + mut pGroup: *mut SGroup, +) -> bool { + let mut pMyTriInfo: *mut STriInfo = + &mut *psTriInfos.offset(iMyTriIndex as isize) as *mut STriInfo; + // track down vertex + let iVertRep: i32 = (*pGroup).iVertexRepresentitive; + let mut pVerts: *const i32 = + &*piTriListIn.offset((3i32 * iMyTriIndex + 0i32) as isize) as *const i32; + let mut i: i32 = -1i32; + if *pVerts.offset(0isize) == iVertRep { + i = 0i32 + } else if *pVerts.offset(1isize) == iVertRep { + i = 1i32 + } else if *pVerts.offset(2isize) == iVertRep { + i = 2i32 + } + if (*pMyTriInfo).AssignedGroup[i as usize] == pGroup { + return true; + } else { + if !(*pMyTriInfo).AssignedGroup[i as usize].is_null() { + return false; + } + } + if (*pMyTriInfo).iFlag & 4i32 != 0i32 { + if (*pMyTriInfo).AssignedGroup[0usize].is_null() + && (*pMyTriInfo).AssignedGroup[1usize].is_null() + && (*pMyTriInfo).AssignedGroup[2usize].is_null() + { + (*pMyTriInfo).iFlag &= !8i32; + (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreservering { + 8i32 + } else { + 0i32 + } + } + } + let bOrient: bool = if (*pMyTriInfo).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + if bOrient != (*pGroup).bOrientPreservering { + return false; + } + AddTriToGroup(pGroup, iMyTriIndex); + (*pMyTriInfo).AssignedGroup[i as usize] = pGroup; + let neigh_indexL: i32 = (*pMyTriInfo).FaceNeighbors[i as usize]; + let neigh_indexR: i32 = + (*pMyTriInfo).FaceNeighbors[(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; + if neigh_indexL >= 0i32 { + AssignRecur(piTriListIn, psTriInfos, neigh_indexL, pGroup); + } + if neigh_indexR >= 0i32 { + AssignRecur(piTriListIn, psTriInfos, neigh_indexR, pGroup); + } + return true; +} +unsafe fn AddTriToGroup(mut pGroup: *mut SGroup, iTriIndex: i32) { + *(*pGroup).pFaceIndices.offset((*pGroup).iNrFaces as isize) = iTriIndex; + (*pGroup).iNrFaces += 1; +} +unsafe fn InitTriInfo( + mut pTriInfos: *mut STriInfo, + mut piTriListIn: *const i32, + geometry: &mut I, + iNrTrianglesIn: usize, +) { + let mut f = 0; + let mut i = 0; + let mut t = 0; + f = 0; + while f < iNrTrianglesIn { + i = 0i32; + while i < 3i32 { + (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize] = -1i32; + let ref mut fresh4 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; + *fresh4 = 0 as *mut SGroup; + (*pTriInfos.offset(f as isize)).vOs.x = 0.0f32; + (*pTriInfos.offset(f as isize)).vOs.y = 0.0f32; + (*pTriInfos.offset(f as isize)).vOs.z = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.x = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.y = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.z = 0.0f32; + (*pTriInfos.offset(f as isize)).fMagS = 0i32 as f32; + (*pTriInfos.offset(f as isize)).fMagT = 0i32 as f32; + (*pTriInfos.offset(f as isize)).iFlag |= 4i32; + i += 1 + } + f += 1 + } + f = 0; + while f < iNrTrianglesIn { + let v1 = get_position(geometry, *piTriListIn.offset((f * 3 + 0) as isize) as usize); + let v2 = get_position(geometry, *piTriListIn.offset((f * 3 + 1) as isize) as usize); + let v3 = get_position(geometry, *piTriListIn.offset((f * 3 + 2) as isize) as usize); + let t1 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 0) as isize) as usize); + let t2 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 1) as isize) as usize); + let t3 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 2) as isize) as usize); + let t21x: f32 = t2.x - t1.x; + let t21y: f32 = t2.y - t1.y; + let t31x: f32 = t3.x - t1.x; + let t31y: f32 = t3.y - t1.y; + let d1 = v2 - v1; + let d2 = v3 - v1; + let fSignedAreaSTx2: f32 = t21x * t31y - t21y * t31x; + let mut vOs = (t31y * d1) - (t21y * d2); + let mut vOt = (-t31x * d1) + (t21x * d2); + (*pTriInfos.offset(f as isize)).iFlag |= if fSignedAreaSTx2 > 0i32 as f32 { + 8i32 + } else { + 0i32 + }; + if NotZero(fSignedAreaSTx2) { + let fAbsArea: f32 = fSignedAreaSTx2.abs(); + let fLenOs: f32 = vOs.length(); + let fLenOt: f32 = vOt.length(); + let fS: f32 = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 == 0i32 { + -1.0f32 + } else { + 1.0f32 + }; + if NotZero(fLenOs) { + (*pTriInfos.offset(f as isize)).vOs = (fS / fLenOs) * vOs + } + if NotZero(fLenOt) { + (*pTriInfos.offset(f as isize)).vOt = (fS / fLenOt) * vOt + } + (*pTriInfos.offset(f as isize)).fMagS = fLenOs / fAbsArea; + (*pTriInfos.offset(f as isize)).fMagT = fLenOt / fAbsArea; + if NotZero((*pTriInfos.offset(f as isize)).fMagS) + && NotZero((*pTriInfos.offset(f as isize)).fMagT) + { + (*pTriInfos.offset(f as isize)).iFlag &= !4i32 + } + } + f += 1 + } + while t < iNrTrianglesIn - 1 { + let iFO_a: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + let iFO_b: i32 = (*pTriInfos.offset((t + 1) as isize)).iOrgFaceNumber; + if iFO_a == iFO_b { + let bIsDeg_a: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 != 0i32 { + true + } else { + false + }; + let bIsDeg_b: bool = if (*pTriInfos.offset((t + 1) as isize)).iFlag & 1i32 != 0i32 { + true + } else { + false + }; + if !(bIsDeg_a || bIsDeg_b) { + let bOrientA: bool = if (*pTriInfos.offset(t as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bOrientB: bool = if (*pTriInfos.offset((t + 1) as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + if bOrientA != bOrientB { + let mut bChooseOrientFirstTri: bool = false; + if (*pTriInfos.offset((t + 1) as isize)).iFlag & 4i32 != 0i32 { + bChooseOrientFirstTri = true + } else if CalcTexArea(geometry, &*piTriListIn.offset((t * 3 + 0) as isize)) + >= CalcTexArea(geometry, &*piTriListIn.offset(((t + 1) * 3 + 0) as isize)) + { + bChooseOrientFirstTri = true + } + let t0 = if bChooseOrientFirstTri { t } else { t + 1 }; + let t1_0 = if bChooseOrientFirstTri { t + 1 } else { t }; + (*pTriInfos.offset(t1_0 as isize)).iFlag &= !8i32; + (*pTriInfos.offset(t1_0 as isize)).iFlag |= + (*pTriInfos.offset(t0 as isize)).iFlag & 8i32 + } + } + t += 2 + } else { + t += 1 + } + } + + let mut pEdges = vec![SEdge::zero(); iNrTrianglesIn * 3]; + BuildNeighborsFast( + pTriInfos, + pEdges.as_mut_ptr(), + piTriListIn, + iNrTrianglesIn as i32, + ); +} + +unsafe fn BuildNeighborsFast( + mut pTriInfos: *mut STriInfo, + mut pEdges: *mut SEdge, + mut piTriListIn: *const i32, + iNrTrianglesIn: i32, +) { + // build array of edges + // could replace with a random seed? + let mut uSeed: u32 = 39871946i32 as u32; + let mut iEntries: i32 = 0i32; + let mut iCurStartIndex: i32 = -1i32; + let mut f: i32 = 0i32; + let mut i: i32 = 0i32; + f = 0i32; + while f < iNrTrianglesIn { + i = 0i32; + while i < 3i32 { + let i0: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); + let i1: i32 = + *piTriListIn.offset((f * 3i32 + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i0 = if i0 < i1 { i0 } else { i1 }; + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i1 = if !(i0 < i1) { i0 } else { i1 }; + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.f = f; + i += 1 + } + f += 1 + } + QuickSortEdges(pEdges, 0i32, iNrTrianglesIn * 3i32 - 1i32, 0i32, uSeed); + iEntries = iNrTrianglesIn * 3i32; + iCurStartIndex = 0i32; + i = 1i32; + while i < iEntries { + if (*pEdges.offset(iCurStartIndex as isize)).unnamed.i0 + != (*pEdges.offset(i as isize)).unnamed.i0 + { + let iL: i32 = iCurStartIndex; + let iR: i32 = i - 1i32; + iCurStartIndex = i; + QuickSortEdges(pEdges, iL, iR, 1i32, uSeed); + } + i += 1 + } + iCurStartIndex = 0i32; + i = 1i32; + while i < iEntries { + if (*pEdges.offset(iCurStartIndex as isize)).unnamed.i0 + != (*pEdges.offset(i as isize)).unnamed.i0 + || (*pEdges.offset(iCurStartIndex as isize)).unnamed.i1 + != (*pEdges.offset(i as isize)).unnamed.i1 + { + let iL_0: i32 = iCurStartIndex; + let iR_0: i32 = i - 1i32; + iCurStartIndex = i; + QuickSortEdges(pEdges, iL_0, iR_0, 2i32, uSeed); + } + i += 1 + } + i = 0i32; + while i < iEntries { + let i0_0: i32 = (*pEdges.offset(i as isize)).unnamed.i0; + let i1_0: i32 = (*pEdges.offset(i as isize)).unnamed.i1; + let f_0: i32 = (*pEdges.offset(i as isize)).unnamed.f; + let mut bUnassigned_A: bool = false; + let mut i0_A: i32 = 0; + let mut i1_A: i32 = 0; + let mut edgenum_A: i32 = 0; + let mut edgenum_B: i32 = 0i32; + GetEdge( + &mut i0_A, + &mut i1_A, + &mut edgenum_A, + &*piTriListIn.offset((f_0 * 3i32) as isize), + i0_0, + i1_0, + ); + bUnassigned_A = + if (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] == -1i32 { + true + } else { + false + }; + if bUnassigned_A { + let mut j: i32 = i + 1i32; + let mut t: i32 = 0; + let mut bNotFound: bool = true; + while j < iEntries + && i0_0 == (*pEdges.offset(j as isize)).unnamed.i0 + && i1_0 == (*pEdges.offset(j as isize)).unnamed.i1 + && bNotFound + { + let mut bUnassigned_B: bool = false; + let mut i0_B: i32 = 0; + let mut i1_B: i32 = 0; + t = (*pEdges.offset(j as isize)).unnamed.f; + GetEdge( + &mut i1_B, + &mut i0_B, + &mut edgenum_B, + &*piTriListIn.offset((t * 3i32) as isize), + (*pEdges.offset(j as isize)).unnamed.i0, + (*pEdges.offset(j as isize)).unnamed.i1, + ); + bUnassigned_B = + if (*pTriInfos.offset(t as isize)).FaceNeighbors[edgenum_B as usize] == -1i32 { + true + } else { + false + }; + if i0_A == i0_B && i1_A == i1_B && bUnassigned_B { + bNotFound = false + } else { + j += 1 + } + } + if !bNotFound { + let mut t_0: i32 = (*pEdges.offset(j as isize)).unnamed.f; + (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] = t_0; + (*pTriInfos.offset(t_0 as isize)).FaceNeighbors[edgenum_B as usize] = f_0 + } + } + i += 1 + } +} +unsafe fn GetEdge( + mut i0_out: *mut i32, + mut i1_out: *mut i32, + mut edgenum_out: *mut i32, + mut indices: *const i32, + i0_in: i32, + i1_in: i32, +) { + *edgenum_out = -1i32; + if *indices.offset(0isize) == i0_in || *indices.offset(0isize) == i1_in { + if *indices.offset(1isize) == i0_in || *indices.offset(1isize) == i1_in { + *edgenum_out.offset(0isize) = 0i32; + *i0_out.offset(0isize) = *indices.offset(0isize); + *i1_out.offset(0isize) = *indices.offset(1isize) + } else { + *edgenum_out.offset(0isize) = 2i32; + *i0_out.offset(0isize) = *indices.offset(2isize); + *i1_out.offset(0isize) = *indices.offset(0isize) + } + } else { + *edgenum_out.offset(0isize) = 1i32; + *i0_out.offset(0isize) = *indices.offset(1isize); + *i1_out.offset(0isize) = *indices.offset(2isize) + }; +} +// /////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////// +unsafe fn QuickSortEdges( + mut pSortBuffer: *mut SEdge, + mut iLeft: i32, + mut iRight: i32, + channel: i32, + mut uSeed: u32, +) { + let mut t: u32 = 0; + let mut iL: i32 = 0; + let mut iR: i32 = 0; + let mut n: i32 = 0; + let mut index: i32 = 0; + let mut iMid: i32 = 0; + // early out + let mut sTmp: SEdge = SEdge { + unnamed: unnamed { i0: 0, i1: 0, f: 0 }, + }; + let iElems: i32 = iRight - iLeft + 1i32; + if iElems < 2i32 { + return; + } else { + if iElems == 2i32 { + if (*pSortBuffer.offset(iLeft as isize)).array[channel as usize] + > (*pSortBuffer.offset(iRight as isize)).array[channel as usize] + { + sTmp = *pSortBuffer.offset(iLeft as isize); + *pSortBuffer.offset(iLeft as isize) = *pSortBuffer.offset(iRight as isize); + *pSortBuffer.offset(iRight as isize) = sTmp + } + return; + } + } + + // Random + t = uSeed & 31i32 as u32; + t = uSeed.rotate_left(t) | uSeed.rotate_right((32i32 as u32).wrapping_sub(t)); + uSeed = uSeed.wrapping_add(t).wrapping_add(3i32 as u32); + // Random end + + iL = iLeft; + iR = iRight; + n = iR - iL + 1i32; + index = uSeed.wrapping_rem(n as u32) as i32; + iMid = (*pSortBuffer.offset((index + iL) as isize)).array[channel as usize]; + loop { + while (*pSortBuffer.offset(iL as isize)).array[channel as usize] < iMid { + iL += 1 + } + while (*pSortBuffer.offset(iR as isize)).array[channel as usize] > iMid { + iR -= 1 + } + if iL <= iR { + sTmp = *pSortBuffer.offset(iL as isize); + *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); + *pSortBuffer.offset(iR as isize) = sTmp; + iL += 1; + iR -= 1 + } + if !(iL <= iR) { + break; + } + } + if iLeft < iR { + QuickSortEdges(pSortBuffer, iLeft, iR, channel, uSeed); + } + if iL < iRight { + QuickSortEdges(pSortBuffer, iL, iRight, channel, uSeed); + }; +} + +// returns the texture area times 2 +unsafe fn CalcTexArea(geometry: &mut I, mut indices: *const i32) -> f32 { + let t1 = get_tex_coord(geometry, *indices.offset(0isize) as usize); + let t2 = get_tex_coord(geometry, *indices.offset(1isize) as usize); + let t3 = get_tex_coord(geometry, *indices.offset(2isize) as usize); + let t21x: f32 = t2.x - t1.x; + let t21y: f32 = t2.y - t1.y; + let t31x: f32 = t3.x - t1.x; + let t31y: f32 = t3.y - t1.y; + let fSignedAreaSTx2: f32 = t21x * t31y - t21y * t31x; + return if fSignedAreaSTx2 < 0i32 as f32 { + -fSignedAreaSTx2 + } else { + fSignedAreaSTx2 + }; +} + +// degen triangles +unsafe fn DegenPrologue( + mut pTriInfos: *mut STriInfo, + mut piTriList_out: *mut i32, + iNrTrianglesIn: i32, + iTotTris: i32, +) { + let mut iNextGoodTriangleSearchIndex: i32 = -1i32; + let mut bStillFindingGoodOnes: bool = false; + // locate quads with only one good triangle + let mut t: i32 = 0i32; + while t < iTotTris - 1i32 { + let iFO_a: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + let iFO_b: i32 = (*pTriInfos.offset((t + 1i32) as isize)).iOrgFaceNumber; + if iFO_a == iFO_b { + let bIsDeg_a: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 != 0i32 { + true + } else { + false + }; + let bIsDeg_b: bool = if (*pTriInfos.offset((t + 1i32) as isize)).iFlag & 1i32 != 0i32 { + true + } else { + false + }; + if bIsDeg_a ^ bIsDeg_b != false { + (*pTriInfos.offset(t as isize)).iFlag |= 2i32; + (*pTriInfos.offset((t + 1i32) as isize)).iFlag |= 2i32 + } + t += 2i32 + } else { + t += 1 + } + } + iNextGoodTriangleSearchIndex = 1i32; + t = 0i32; + bStillFindingGoodOnes = true; + while t < iNrTrianglesIn && bStillFindingGoodOnes { + let bIsGood: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 == 0i32 { + true + } else { + false + }; + if bIsGood { + if iNextGoodTriangleSearchIndex < t + 2i32 { + iNextGoodTriangleSearchIndex = t + 2i32 + } + } else { + let mut t0: i32 = 0; + let mut t1: i32 = 0; + let mut bJustADegenerate: bool = true; + while bJustADegenerate && iNextGoodTriangleSearchIndex < iTotTris { + let bIsGood_0: bool = + if (*pTriInfos.offset(iNextGoodTriangleSearchIndex as isize)).iFlag & 1i32 + == 0i32 + { + true + } else { + false + }; + if bIsGood_0 { + bJustADegenerate = false + } else { + iNextGoodTriangleSearchIndex += 1 + } + } + t0 = t; + t1 = iNextGoodTriangleSearchIndex; + iNextGoodTriangleSearchIndex += 1; + if !bJustADegenerate { + let mut i: i32 = 0i32; + i = 0i32; + while i < 3i32 { + let index: i32 = *piTriList_out.offset((t0 * 3i32 + i) as isize); + *piTriList_out.offset((t0 * 3i32 + i) as isize) = + *piTriList_out.offset((t1 * 3i32 + i) as isize); + *piTriList_out.offset((t1 * 3i32 + i) as isize) = index; + i += 1 + } + let tri_info: STriInfo = *pTriInfos.offset(t0 as isize); + *pTriInfos.offset(t0 as isize) = *pTriInfos.offset(t1 as isize); + *pTriInfos.offset(t1 as isize) = tri_info + } else { + bStillFindingGoodOnes = false + } + } + if bStillFindingGoodOnes { + t += 1 + } + } +} +unsafe fn GenerateSharedVerticesIndexList( + mut piTriList_in_and_out: *mut i32, + geometry: &mut I, + iNrTrianglesIn: usize, +) { + let mut i = 0; + let mut iChannel: i32 = 0i32; + let mut k = 0; + let mut e = 0; + let mut iMaxCount = 0; + let mut vMin = get_position(geometry, 0); + let mut vMax = vMin; + let mut vDim = Vec3::new(0.0, 0.0, 0.0); + let mut fMin: f32 = 0.; + let mut fMax: f32 = 0.; + i = 1; + while i < iNrTrianglesIn * 3 { + let index: i32 = *piTriList_in_and_out.offset(i as isize); + let vP = get_position(geometry, index as usize); + if vMin.x > vP.x { + vMin.x = vP.x + } else if vMax.x < vP.x { + vMax.x = vP.x + } + if vMin.y > vP.y { + vMin.y = vP.y + } else if vMax.y < vP.y { + vMax.y = vP.y + } + if vMin.z > vP.z { + vMin.z = vP.z + } else if vMax.z < vP.z { + vMax.z = vP.z + } + i += 1 + } + vDim = vMax - vMin; + iChannel = 0i32; + fMin = vMin.x; + fMax = vMax.x; + if vDim.y > vDim.x && vDim.y > vDim.z { + iChannel = 1i32; + fMin = vMin.y; + fMax = vMax.y + } else if vDim.z > vDim.x { + iChannel = 2i32; + fMin = vMin.z; + fMax = vMax.z + } + + let mut piHashTable = vec![0i32; iNrTrianglesIn * 3]; + let mut piHashOffsets = vec![0i32; g_iCells]; + let mut piHashCount = vec![0i32; g_iCells]; + let mut piHashCount2 = vec![0i32; g_iCells]; + + i = 0; + while i < iNrTrianglesIn * 3 { + let index_0: i32 = *piTriList_in_and_out.offset(i as isize); + let vP_0 = get_position(geometry, index_0 as usize); + let fVal: f32 = if iChannel == 0i32 { + vP_0.x + } else if iChannel == 1i32 { + vP_0.y + } else { + vP_0.z + }; + let iCell = FindGridCell(fMin, fMax, fVal); + piHashCount[iCell] += 1; + i += 1 + } + piHashOffsets[0] = 0i32; + k = 1; + while k < g_iCells { + piHashOffsets[k] = piHashOffsets[k - 1] + piHashCount[k - 1]; + k += 1 + } + i = 0; + while i < iNrTrianglesIn * 3 { + let index_1: i32 = *piTriList_in_and_out.offset(i as isize); + let vP_1 = get_position(geometry, index_1 as usize); + let fVal_0: f32 = if iChannel == 0i32 { + vP_1.x + } else if iChannel == 1i32 { + vP_1.y + } else { + vP_1.z + }; + let iCell_0 = FindGridCell(fMin, fMax, fVal_0); + let mut pTable: *mut i32 = 0 as *mut i32; + pTable = &mut piHashTable[piHashOffsets[iCell_0] as usize] as *mut i32; + *pTable.offset(piHashCount2[iCell_0] as isize) = i as i32; + piHashCount2[iCell_0] += 1; + i += 1 + } + k = 0; + while k < g_iCells { + k += 1 + } + iMaxCount = piHashCount[0] as usize; + k = 1; + while k < g_iCells { + if iMaxCount < piHashCount[k] as usize { + iMaxCount = piHashCount[k] as usize + } + k += 1 + } + let mut pTmpVert = vec![STmpVert::zero(); iMaxCount]; + k = 0; + while k < g_iCells { + // extract table of cell k and amount of entries in it + let mut pTable_0 = &mut piHashTable[piHashOffsets[k] as usize] as *mut i32; + let iEntries = piHashCount[k] as usize; + if !(iEntries < 2) { + e = 0; + while e < iEntries { + let mut i_0: i32 = *pTable_0.offset(e as isize); + let vP_2 = get_position( + geometry, + *piTriList_in_and_out.offset(i_0 as isize) as usize, + ); + pTmpVert[e].vert[0usize] = vP_2.x; + pTmpVert[e].vert[1usize] = vP_2.y; + pTmpVert[e].vert[2usize] = vP_2.z; + pTmpVert[e].index = i_0; + e += 1 + } + MergeVertsFast( + piTriList_in_and_out, + pTmpVert.as_mut_ptr(), + geometry, + 0i32, + (iEntries - 1) as i32, + ); + } + k += 1 + } +} + +unsafe fn MergeVertsFast( + mut piTriList_in_and_out: *mut i32, + mut pTmpVert: *mut STmpVert, + geometry: &mut I, + iL_in: i32, + iR_in: i32, +) { + // make bbox + let mut c: i32 = 0i32; + let mut l: i32 = 0i32; + let mut channel: i32 = 0i32; + let mut fvMin: [f32; 3] = [0.; 3]; + let mut fvMax: [f32; 3] = [0.; 3]; + let mut dx: f32 = 0i32 as f32; + let mut dy: f32 = 0i32 as f32; + let mut dz: f32 = 0i32 as f32; + let mut fSep: f32 = 0i32 as f32; + c = 0i32; + while c < 3i32 { + fvMin[c as usize] = (*pTmpVert.offset(iL_in as isize)).vert[c as usize]; + fvMax[c as usize] = fvMin[c as usize]; + c += 1 + } + l = iL_in + 1i32; + while l <= iR_in { + c = 0i32; + while c < 3i32 { + if fvMin[c as usize] > (*pTmpVert.offset(l as isize)).vert[c as usize] { + fvMin[c as usize] = (*pTmpVert.offset(l as isize)).vert[c as usize] + } else if fvMax[c as usize] < (*pTmpVert.offset(l as isize)).vert[c as usize] { + fvMax[c as usize] = (*pTmpVert.offset(l as isize)).vert[c as usize] + } + c += 1 + } + l += 1 + } + dx = fvMax[0usize] - fvMin[0usize]; + dy = fvMax[1usize] - fvMin[1usize]; + dz = fvMax[2usize] - fvMin[2usize]; + channel = 0i32; + if dy > dx && dy > dz { + channel = 1i32 + } else if dz > dx { + channel = 2i32 + } + fSep = 0.5f32 * (fvMax[channel as usize] + fvMin[channel as usize]); + if fSep >= fvMax[channel as usize] || fSep <= fvMin[channel as usize] { + l = iL_in; + while l <= iR_in { + let mut i: i32 = (*pTmpVert.offset(l as isize)).index; + let index: i32 = *piTriList_in_and_out.offset(i as isize); + let vP = get_position(geometry, index as usize); + let vN = get_normal(geometry, index as usize); + let vT = get_tex_coord(geometry, index as usize); + let mut bNotFound: bool = true; + let mut l2: i32 = iL_in; + let mut i2rec: i32 = -1i32; + while l2 < l && bNotFound { + let i2: i32 = (*pTmpVert.offset(l2 as isize)).index; + let index2: i32 = *piTriList_in_and_out.offset(i2 as isize); + let vP2 = get_position(geometry, index2 as usize); + let vN2 = get_normal(geometry, index2 as usize); + let vT2 = get_tex_coord(geometry, index2 as usize); + i2rec = i2; + if vP.x == vP2.x + && vP.y == vP2.y + && vP.z == vP2.z + && vN.x == vN2.x + && vN.y == vN2.y + && vN.z == vN2.z + && vT.x == vT2.x + && vT.y == vT2.y + && vT.z == vT2.z + { + bNotFound = false + } else { + l2 += 1 + } + } + if !bNotFound { + *piTriList_in_and_out.offset(i as isize) = + *piTriList_in_and_out.offset(i2rec as isize) + } + l += 1 + } + } else { + let mut iL: i32 = iL_in; + let mut iR: i32 = iR_in; + while iL < iR { + let mut bReadyLeftSwap: bool = false; + let mut bReadyRightSwap: bool = false; + while !bReadyLeftSwap && iL < iR { + bReadyLeftSwap = !((*pTmpVert.offset(iL as isize)).vert[channel as usize] < fSep); + if !bReadyLeftSwap { + iL += 1 + } + } + while !bReadyRightSwap && iL < iR { + bReadyRightSwap = (*pTmpVert.offset(iR as isize)).vert[channel as usize] < fSep; + if !bReadyRightSwap { + iR -= 1 + } + } + if bReadyLeftSwap && bReadyRightSwap { + let sTmp: STmpVert = *pTmpVert.offset(iL as isize); + *pTmpVert.offset(iL as isize) = *pTmpVert.offset(iR as isize); + *pTmpVert.offset(iR as isize) = sTmp; + iL += 1; + iR -= 1 + } + } + if iL == iR { + let bReadyRightSwap_0: bool = + (*pTmpVert.offset(iR as isize)).vert[channel as usize] < fSep; + if bReadyRightSwap_0 { + iL += 1 + } else { + iR -= 1 + } + } + if iL_in < iR { + MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL_in, iR); + } + if iL < iR_in { + MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL, iR_in); + } + }; +} + +const g_iCells: usize = 2048; + +// it is IMPORTANT that this function is called to evaluate the hash since +// inlining could potentially reorder instructions and generate different +// results for the same effective input value fVal. +#[inline(never)] +unsafe fn FindGridCell(fMin: f32, fMax: f32, fVal: f32) -> usize { + let fIndex = g_iCells as f32 * ((fVal - fMin) / (fMax - fMin)); + let iIndex = fIndex as isize; + return if iIndex < g_iCells as isize { + if iIndex >= 0 { + iIndex as usize + } else { + 0 + } + } else { + g_iCells - 1 + }; +} + +unsafe fn GenerateInitialVerticesIndexList( + pTriInfos: &mut [STriInfo], + piTriList_out: &mut [i32], + geometry: &mut I, + iNrTrianglesIn: usize, +) -> usize { + let mut iTSpacesOffs: usize = 0; + let mut f = 0; + let mut t: usize = 0; + let mut iDstTriIndex = 0; + f = 0; + while f < geometry.num_faces() { + let verts = geometry.num_vertices_of_face(f); + if !(verts != 3 && verts != 4) { + pTriInfos[iDstTriIndex].iOrgFaceNumber = f as i32; + pTriInfos[iDstTriIndex].iTSpacesOffs = iTSpacesOffs as i32; + if verts == 3 { + let mut pVerts = &mut pTriInfos[iDstTriIndex].vert_num; + pVerts[0] = 0; + pVerts[1] = 1; + pVerts[2] = 2; + piTriList_out[iDstTriIndex * 3 + 0] = face_vert_to_index(f, 0) as i32; + piTriList_out[iDstTriIndex * 3 + 1] = face_vert_to_index(f, 1) as i32; + piTriList_out[iDstTriIndex * 3 + 2] = face_vert_to_index(f, 2) as i32; + iDstTriIndex += 1 + } else { + pTriInfos[iDstTriIndex + 1].iOrgFaceNumber = f as i32; + pTriInfos[iDstTriIndex + 1].iTSpacesOffs = iTSpacesOffs as i32; + let i0 = face_vert_to_index(f, 0); + let i1 = face_vert_to_index(f, 1); + let i2 = face_vert_to_index(f, 2); + let i3 = face_vert_to_index(f, 3); + let T0 = get_tex_coord(geometry, i0); + let T1 = get_tex_coord(geometry, i1); + let T2 = get_tex_coord(geometry, i2); + let T3 = get_tex_coord(geometry, i3); + let distSQ_02: f32 = (T2 - T0).length_squared(); + let distSQ_13: f32 = (T3 - T1).length_squared(); + let mut bQuadDiagIs_02: bool = false; + if distSQ_02 < distSQ_13 { + bQuadDiagIs_02 = true + } else if distSQ_13 < distSQ_02 { + bQuadDiagIs_02 = false + } else { + let P0 = get_position(geometry, i0); + let P1 = get_position(geometry, i1); + let P2 = get_position(geometry, i2); + let P3 = get_position(geometry, i3); + let distSQ_02_0: f32 = (P2 - P0).length_squared(); + let distSQ_13_0: f32 = (P3 - P1).length_squared(); + bQuadDiagIs_02 = if distSQ_13_0 < distSQ_02_0 { + false + } else { + true + } + } + if bQuadDiagIs_02 { + let mut pVerts_A = &mut pTriInfos[iDstTriIndex].vert_num; + pVerts_A[0] = 0; + pVerts_A[1] = 1; + pVerts_A[2] = 2; + piTriList_out[iDstTriIndex * 3 + 0] = i0 as i32; + piTriList_out[iDstTriIndex * 3 + 1] = i1 as i32; + piTriList_out[iDstTriIndex * 3 + 2] = i2 as i32; + iDstTriIndex += 1; + + let mut pVerts_B = &mut pTriInfos[iDstTriIndex].vert_num; + pVerts_B[0] = 0; + pVerts_B[1] = 2; + pVerts_B[2] = 3; + piTriList_out[iDstTriIndex * 3 + 0] = i0 as i32; + piTriList_out[iDstTriIndex * 3 + 1] = i2 as i32; + piTriList_out[iDstTriIndex * 3 + 2] = i3 as i32; + iDstTriIndex += 1 + } else { + let mut pVerts_A_0 = &mut pTriInfos[iDstTriIndex].vert_num; + pVerts_A_0[0] = 0; + pVerts_A_0[1] = 1; + pVerts_A_0[2] = 3; + piTriList_out[iDstTriIndex * 3 + 0] = i0 as i32; + piTriList_out[iDstTriIndex * 3 + 1] = i1 as i32; + piTriList_out[iDstTriIndex * 3 + 2] = i3 as i32; + iDstTriIndex += 1; + + let mut pVerts_B_0 = &mut pTriInfos[iDstTriIndex].vert_num; + pVerts_B_0[0] = 1; + pVerts_B_0[1] = 2; + pVerts_B_0[2] = 3; + piTriList_out[iDstTriIndex * 3 + 0] = i1 as i32; + piTriList_out[iDstTriIndex * 3 + 1] = i2 as i32; + piTriList_out[iDstTriIndex * 3 + 2] = i3 as i32; + iDstTriIndex += 1 + } + } + iTSpacesOffs += verts + } + f += 1 + } + t = 0; + while t < iNrTrianglesIn { + pTriInfos[t].iFlag = 0; + t += 1 + } + return iTSpacesOffs; +} diff --git a/crates/bevy_mikktspace/src/lib.rs b/crates/bevy_mikktspace/src/lib.rs new file mode 100644 index 0000000000000..89d7b05427407 --- /dev/null +++ b/crates/bevy_mikktspace/src/lib.rs @@ -0,0 +1,85 @@ +#![allow(clippy::all)] + +use glam::{Vec2, Vec3}; + +mod generated; + +/// The interface by which mikktspace interacts with your geometry. +pub trait Geometry { + /// Returns the number of faces. + fn num_faces(&self) -> usize; + + /// Returns the number of vertices of a face. + fn num_vertices_of_face(&self, face: usize) -> usize; + + /// Returns the position of a vertex. + fn position(&self, face: usize, vert: usize) -> [f32; 3]; + + /// Returns the normal of a vertex. + fn normal(&self, face: usize, vert: usize) -> [f32; 3]; + + /// Returns the texture coordinate of a vertex. + fn tex_coord(&self, face: usize, vert: usize) -> [f32; 2]; + + /// Sets the generated tangent for a vertex. + /// Leave this function unimplemented if you are implementing + /// `set_tangent_encoded`. + fn set_tangent( + &mut self, + tangent: [f32; 3], + _bi_tangent: [f32; 3], + _f_mag_s: f32, + _f_mag_t: f32, + bi_tangent_preserves_orientation: bool, + face: usize, + vert: usize, + ) { + let sign = if bi_tangent_preserves_orientation { + 1.0 + } else { + -1.0 + }; + self.set_tangent_encoded([tangent[0], tangent[1], tangent[2], sign], face, vert); + } + + /// Sets the generated tangent for a vertex with its bi-tangent encoded as the 'W' (4th) + /// component in the tangent. The 'W' component marks if the bi-tangent is flipped. This + /// is called by the default implementation of `set_tangent`; therefore, this function will + /// not be called by the crate unless `set_tangent` is unimplemented. + fn set_tangent_encoded(&mut self, _tangent: [f32; 4], _face: usize, _vert: usize) {} +} + +/// Generates tangents for the input geometry. +/// +/// # Errors +/// +/// Returns `false` if the geometry is unsuitable for tangent generation including, +/// but not limited to, lack of vertices. +pub fn generate_tangents(geometry: &mut I) -> bool { + unsafe { generated::genTangSpace(geometry, 180.0) } +} + +fn get_position(geometry: &mut I, index: usize) -> Vec3 { + let (face, vert) = index_to_face_vert(index); + geometry.position(face, vert).into() +} + +fn get_tex_coord(geometry: &mut I, index: usize) -> Vec3 { + let (face, vert) = index_to_face_vert(index); + let tex_coord: Vec2 = geometry.tex_coord(face, vert).into(); + let val = tex_coord.extend(1.0); + val +} + +fn get_normal(geometry: &mut I, index: usize) -> Vec3 { + let (face, vert) = index_to_face_vert(index); + geometry.normal(face, vert).into() +} + +fn index_to_face_vert(index: usize) -> (usize, usize) { + (index >> 2, index & 0x3) +} + +fn face_vert_to_index(face: usize, vert: usize) -> usize { + face << 2 | vert & 0x3 +} diff --git a/crates/bevy_mikktspace/tests/regression_test.rs b/crates/bevy_mikktspace/tests/regression_test.rs new file mode 100644 index 0000000000000..42177cbc3496d --- /dev/null +++ b/crates/bevy_mikktspace/tests/regression_test.rs @@ -0,0 +1,889 @@ +#![allow( + clippy::bool_assert_comparison, + clippy::useless_conversion, + clippy::redundant_else, + clippy::match_same_arms, + clippy::semicolon_if_nothing_returned, + clippy::explicit_iter_loop, + clippy::map_flatten +)] + +use bevy_mikktspace::{generate_tangents, Geometry}; +use glam::{Vec2, Vec3}; + +pub type Face = [u32; 3]; + +#[derive(Debug)] +struct Vertex { + position: Vec3, + normal: Vec3, + tex_coord: Vec2, +} + +#[derive(Debug, PartialEq)] +struct Result { + tangent: [f32; 3], + bi_tangent: [f32; 3], + mag_s: f32, + mag_t: f32, + bi_tangent_preserves_orientation: bool, + face: usize, + vert: usize, +} + +impl Result { + fn new( + tangent: [f32; 3], + bi_tangent: [f32; 3], + mag_s: f32, + mag_t: f32, + bi_tangent_preserves_orientation: bool, + face: usize, + vert: usize, + ) -> Self { + Self { + tangent, + bi_tangent, + mag_s, + mag_t, + bi_tangent_preserves_orientation, + face, + vert, + } + } +} + +struct Mesh { + faces: Vec, + vertices: Vec, +} + +struct Context { + mesh: Mesh, + results: Vec, +} + +fn vertex(mesh: &Mesh, face: usize, vert: usize) -> &Vertex { + let vs: &[u32; 3] = &mesh.faces[face]; + &mesh.vertices[vs[vert] as usize] +} + +impl Geometry for Context { + fn num_faces(&self) -> usize { + self.mesh.faces.len() + } + + fn num_vertices_of_face(&self, _face: usize) -> usize { + 3 + } + + fn position(&self, face: usize, vert: usize) -> [f32; 3] { + vertex(&self.mesh, face, vert).position.into() + } + + fn normal(&self, face: usize, vert: usize) -> [f32; 3] { + vertex(&self.mesh, face, vert).normal.into() + } + + fn tex_coord(&self, face: usize, vert: usize) -> [f32; 2] { + vertex(&self.mesh, face, vert).tex_coord.into() + } + + fn set_tangent( + &mut self, + tangent: [f32; 3], + bi_tangent: [f32; 3], + mag_s: f32, + mag_t: f32, + bi_tangent_preserves_orientation: bool, + face: usize, + vert: usize, + ) { + self.results.push(Result { + tangent, + bi_tangent, + mag_s, + mag_t, + bi_tangent_preserves_orientation, + face, + vert, + }) + } +} + +struct ControlPoint { + uv: [f32; 2], + dir: [f32; 3], +} + +impl ControlPoint { + fn new(uv: [f32; 2], dir: [f32; 3]) -> Self { + Self { uv, dir } + } +} + +fn make_cube() -> Mesh { + let mut faces = Vec::new(); + let mut ctl_pts = Vec::new(); + let mut vertices = Vec::new(); + + // +x plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [1.0, -1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [1.0, -1.0, -1.0])); + ctl_pts.push(ControlPoint::new([1.0, 1.0], [1.0, 1.0, -1.0])); + ctl_pts.push(ControlPoint::new([1.0, 0.0], [1.0, 1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.5, 0.5], [1.0, 0.0, 0.0])); + } + + // -x plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint::new([1.0, 0.0], [-1.0, 1.0, 1.0])); + ctl_pts.push(ControlPoint::new([1.0, 1.0], [-1.0, 1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [-1.0, -1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [-1.0, -1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.5, 0.5], [-1.0, 0.0, 0.0])); + } + + // +y plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [1.0, 1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [1.0, 1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [-1.0, 1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [-1.0, 1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.0, 0.5], [0.0, 1.0, 0.0])); + } + + // -y plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [-1.0, -1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [-1.0, -1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [1.0, -1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [1.0, -1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.0, 0.5], [0.0, -1.0, 0.0])); + } + + // +z plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [-1.0, 1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [-1.0, -1.0, 1.0])); + ctl_pts.push(ControlPoint::new([1.0, 1.0], [1.0, -1.0, 1.0])); + ctl_pts.push(ControlPoint::new([1.0, 0.0], [1.0, 1.0, 1.0])); + ctl_pts.push(ControlPoint::new([0.5, 0.5], [0.0, 0.0, 1.0])); + } + + // -z plane + { + let base = ctl_pts.len() as u32; + faces.push([base, base + 1, base + 4]); + faces.push([base + 1, base + 2, base + 4]); + faces.push([base + 2, base + 3, base + 4]); + faces.push([base + 3, base, base + 4]); + ctl_pts.push(ControlPoint::new([1.0, 0.0], [1.0, 1.0, -1.0])); + ctl_pts.push(ControlPoint::new([1.0, 1.0], [1.0, -1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 1.0], [-1.0, -1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.0, 0.0], [-1.0, 1.0, -1.0])); + ctl_pts.push(ControlPoint::new([0.5, 0.5], [0.0, 0.0, -1.0])); + } + + for pt in ctl_pts { + let p: Vec3 = pt.dir.into(); + let n: Vec3 = p.normalize(); + let t: Vec2 = pt.uv.into(); + vertices.push(Vertex { + position: (p / 2.0).into(), + normal: n.into(), + tex_coord: t.into(), + }); + } + + Mesh { faces, vertices } +} + +#[test] +fn cube_tangents_should_equal_reference_values() { + let mut context = Context { + mesh: make_cube(), + results: Vec::new(), + }; + let ret = generate_tangents(&mut context); + assert_eq!(true, ret); + + let expected_results: Vec = vec![ + Result::new( + [0.40824825, 0.81649655, 0.40824825], + [0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 0, + 0, + ), + Result::new( + [0.40824825, 0.81649655, -0.40824825], + [-0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 0, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + false, + 0, + 2, + ), + Result::new( + [0.40824825, 0.81649655, -0.40824825], + [-0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 1, + 0, + ), + Result::new( + [-0.40824825, 0.81649655, 0.40824825], + [-0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 1, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + false, + 1, + 2, + ), + Result::new( + [-0.40824825, 0.81649655, 0.40824825], + [-0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 2, + 0, + ), + Result::new( + [-0.40824825, 0.81649655, -0.40824825], + [0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 2, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + false, + 2, + 2, + ), + Result::new( + [-0.40824825, 0.81649655, -0.40824825], + [0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 3, + 0, + ), + Result::new( + [0.40824825, 0.81649655, 0.40824825], + [0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 3, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + false, + 3, + 2, + ), + Result::new( + [0.40824825, 0.81649655, -0.40824825], + [-0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 4, + 0, + ), + Result::new( + [0.40824825, 0.81649655, 0.40824825], + [0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 4, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + true, + 4, + 2, + ), + Result::new( + [0.40824825, 0.81649655, 0.40824825], + [0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 5, + 0, + ), + Result::new( + [-0.40824825, 0.81649655, -0.40824825], + [0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 5, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + true, + 5, + 2, + ), + Result::new( + [-0.40824825, 0.81649655, -0.40824825], + [0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 6, + 0, + ), + Result::new( + [-0.40824825, 0.81649655, 0.40824825], + [-0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 6, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + true, + 6, + 2, + ), + Result::new( + [-0.40824825, 0.81649655, 0.40824825], + [-0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 7, + 0, + ), + Result::new( + [0.40824825, 0.81649655, -0.40824825], + [-0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 7, + 1, + ), + Result::new( + [0.00000000, 1.00000000, 0.00000000], + [0.00000000, 0.00000000, -1.00000000], + 1.00000000, + 1.00000000, + true, + 7, + 2, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 8, + 0, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 8, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 8, + 2, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 9, + 0, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 9, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 9, + 2, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 10, + 0, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 10, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 10, + 2, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 11, + 0, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 11, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 11, + 2, + ), + Result::new( + [-0.40824825, 0.81649655, 0.40824825], + [-0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 12, + 0, + ), + Result::new( + [-0.40824825, 0.81649655, -0.40824825], + [0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + true, + 12, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 12, + 2, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 13, + 0, + ), + Result::new( + [0.40824825, 0.81649655, -0.40824825], + [-0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 13, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 13, + 2, + ), + Result::new( + [0.40824825, 0.81649655, -0.40824825], + [-0.40824825, 0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 14, + 0, + ), + Result::new( + [0.40824825, 0.81649655, 0.40824825], + [0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 14, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 14, + 2, + ), + Result::new( + [0.40824825, 0.81649655, 0.40824825], + [0.40824825, -0.40824825, -0.81649655], + 1.00000000, + 1.00000000, + false, + 15, + 0, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 15, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, 1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 15, + 2, + ), + Result::new( + [0.81649655, 0.40824825, 0.40824825], + [-0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + false, + 16, + 0, + ), + Result::new( + [0.81649655, -0.40824825, 0.40824825], + [0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + false, + 16, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 16, + 2, + ), + Result::new( + [0.81649655, -0.40824825, 0.40824825], + [0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + false, + 17, + 0, + ), + Result::new( + [0.81649655, 0.40824825, -0.40824825], + [-0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + false, + 17, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 17, + 2, + ), + Result::new( + [0.81649655, 0.40824825, -0.40824825], + [-0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + false, + 18, + 0, + ), + Result::new( + [0.81649655, -0.40824825, -0.40824825], + [0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + false, + 18, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 18, + 2, + ), + Result::new( + [0.81649655, -0.40824825, -0.40824825], + [0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + false, + 19, + 0, + ), + Result::new( + [0.81649655, 0.40824825, 0.40824825], + [-0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + false, + 19, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + false, + 19, + 2, + ), + Result::new( + [0.81649655, -0.40824825, 0.40824825], + [0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + true, + 20, + 0, + ), + Result::new( + [0.81649655, 0.40824825, 0.40824825], + [-0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + true, + 20, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + true, + 20, + 2, + ), + Result::new( + [0.81649655, 0.40824825, 0.40824825], + [-0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + true, + 21, + 0, + ), + Result::new( + [0.81649655, -0.40824825, -0.40824825], + [0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + true, + 21, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + true, + 21, + 2, + ), + Result::new( + [0.81649655, -0.40824825, -0.40824825], + [0.40824825, -0.81649655, 0.40824825], + 1.00000000, + 1.00000000, + true, + 22, + 0, + ), + Result::new( + [0.81649655, 0.40824825, -0.40824825], + [-0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + true, + 22, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + true, + 22, + 2, + ), + Result::new( + [0.81649655, 0.40824825, -0.40824825], + [-0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + true, + 23, + 0, + ), + Result::new( + [0.81649655, -0.40824825, 0.40824825], + [0.40824825, -0.81649655, -0.40824825], + 1.00000000, + 1.00000000, + true, + 23, + 1, + ), + Result::new( + [1.00000000, 0.00000000, 0.00000000], + [0.00000000, -1.00000000, 0.00000000], + 1.00000000, + 1.00000000, + true, + 23, + 2, + ), + ]; + + assert_eq!(expected_results, context.results); +} diff --git a/crates/bevy_pbr/src/lib.rs b/crates/bevy_pbr/src/lib.rs index 51f721da4c240..503edb6b60d0b 100644 --- a/crates/bevy_pbr/src/lib.rs +++ b/crates/bevy_pbr/src/lib.rs @@ -42,6 +42,8 @@ use bevy_asset::{load_internal_asset, Assets, Handle, HandleUntyped}; use bevy_ecs::prelude::*; use bevy_reflect::TypeUuid; use bevy_render::{ + camera::CameraUpdateSystem, + extract_resource::ExtractResourcePlugin, prelude::Color, render_graph::RenderGraph, render_phase::{sort_phase_system, AddRenderCommand, DrawFunctions}, @@ -51,8 +53,22 @@ use bevy_render::{ }; use bevy_transform::TransformSystem; +pub const PBR_TYPES_SHADER_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 1708015359337029744); +pub const PBR_BINDINGS_SHADER_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 5635987986427308186); +pub const UTILS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 1900548483293416725); +pub const CLUSTERED_FORWARD_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 166852093121196815); +pub const PBR_LIGHTING_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 14170772752254856967); +pub const SHADOWS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 11350275143789590502); pub const PBR_SHADER_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 4805239651767701046); +pub const PBR_FUNCTIONS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 16550102964439850292); pub const SHADOW_SHADER_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 1836745567947005696); @@ -62,6 +78,43 @@ pub struct PbrPlugin; impl Plugin for PbrPlugin { fn build(&self, app: &mut App) { + load_internal_asset!( + app, + PBR_TYPES_SHADER_HANDLE, + "render/pbr_types.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + PBR_BINDINGS_SHADER_HANDLE, + "render/pbr_bindings.wgsl", + Shader::from_wgsl + ); + load_internal_asset!(app, UTILS_HANDLE, "render/utils.wgsl", Shader::from_wgsl); + load_internal_asset!( + app, + CLUSTERED_FORWARD_HANDLE, + "render/clustered_forward.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + PBR_LIGHTING_HANDLE, + "render/pbr_lighting.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + SHADOWS_HANDLE, + "render/shadows.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + PBR_FUNCTIONS_HANDLE, + "render/pbr_functions.wgsl", + Shader::from_wgsl + ); load_internal_asset!(app, PBR_SHADER_HANDLE, "render/pbr.wgsl", Shader::from_wgsl); load_internal_asset!( app, @@ -79,6 +132,7 @@ impl Plugin for PbrPlugin { .init_resource::() .init_resource::() .init_resource::() + .add_plugin(ExtractResourcePlugin::::default()) .add_system_to_stage( CoreStage::PostUpdate, // NOTE: Clusters need to have been added before update_clusters is run so @@ -92,6 +146,7 @@ impl Plugin for PbrPlugin { assign_lights_to_clusters .label(SimulationLightSystems::AssignLightsToClusters) .after(TransformSystem::TransformPropagate) + .after(CameraUpdateSystem) .after(ModifiesWindows), ) .add_system_to_stage( @@ -183,19 +238,19 @@ impl Plugin for PbrPlugin { render_app.add_render_command::(); let mut graph = render_app.world.resource_mut::(); let draw_3d_graph = graph - .get_sub_graph_mut(bevy_core_pipeline::draw_3d_graph::NAME) + .get_sub_graph_mut(bevy_core_pipeline::core_3d::graph::NAME) .unwrap(); draw_3d_graph.add_node(draw_3d_graph::node::SHADOW_PASS, shadow_pass_node); draw_3d_graph .add_node_edge( draw_3d_graph::node::SHADOW_PASS, - bevy_core_pipeline::draw_3d_graph::node::MAIN_PASS, + bevy_core_pipeline::core_3d::graph::node::MAIN_PASS, ) .unwrap(); draw_3d_graph .add_slot_edge( draw_3d_graph.input_node().unwrap().id, - bevy_core_pipeline::draw_3d_graph::input::VIEW_ENTITY, + bevy_core_pipeline::core_3d::graph::input::VIEW_ENTITY, draw_3d_graph::node::SHADOW_PASS, ShadowPassNode::IN_VIEW, ) diff --git a/crates/bevy_pbr/src/light.rs b/crates/bevy_pbr/src/light.rs index 22e3fb22cae0e..c8d1ecedd0345 100644 --- a/crates/bevy_pbr/src/light.rs +++ b/crates/bevy_pbr/src/light.rs @@ -1,6 +1,5 @@ use std::collections::HashSet; -use bevy_asset::Assets; use bevy_ecs::prelude::*; use bevy_math::{ const_vec2, Mat4, Quat, UVec2, UVec3, Vec2, Vec3, Vec3A, Vec3Swizzles, Vec4, Vec4Swizzles, @@ -9,7 +8,7 @@ use bevy_reflect::prelude::*; use bevy_render::{ camera::{Camera, CameraProjection, OrthographicProjection}, color::Color, - prelude::Image, + extract_resource::ExtractResource, primitives::{Aabb, CubemapFrusta, Frustum, Plane, Sphere}, render_resource::BufferBindingType, renderer::RenderDevice, @@ -17,7 +16,6 @@ use bevy_render::{ }; use bevy_transform::components::GlobalTransform; use bevy_utils::tracing::warn; -use bevy_window::Windows; use crate::{ calculate_cluster_factors, spotlight_projection_matrix, spotlight_view_matrix, CubeMapFace, @@ -192,7 +190,7 @@ impl Default for DirectionalLightShadowMap { } /// An ambient light, which lights the entire scene equally. -#[derive(Debug)] +#[derive(Clone, Debug, ExtractResource)] pub struct AmbientLight { pub color: Color, /// A direct scale factor multiplied with `color` before being passed to the shader. @@ -348,8 +346,7 @@ impl ClusterConfig { fn first_slice_depth(&self) -> f32 { match self { - ClusterConfig::None => 0.0, - ClusterConfig::Single => 0.0, + ClusterConfig::None | ClusterConfig::Single => 0.0, ClusterConfig::XYZ { z_config, .. } | ClusterConfig::FixedZ { z_config, .. } => { z_config.first_slice_depth } @@ -754,8 +751,6 @@ impl GlobalVisiblePointLights { pub(crate) fn assign_lights_to_clusters( mut commands: Commands, mut global_lights: ResMut, - windows: Res, - images: Res>, mut views: Query<( Entity, &GlobalTransform, @@ -877,19 +872,18 @@ pub(crate) fn assign_lights_to_clusters( continue; } - let screen_size = - if let Some(screen_size) = camera.target.get_physical_size(&windows, &images) { - screen_size - } else { - clusters.clear(); - continue; - }; + let screen_size = if let Some(screen_size) = camera.physical_viewport_size() { + screen_size + } else { + clusters.clear(); + continue; + }; let mut requested_cluster_dimensions = config.dimensions_for_screen_size(screen_size); let view_transform = camera_transform.compute_matrix(); let inverse_view_transform = view_transform.inverse(); - let is_orthographic = camera.projection_matrix.w_axis.w == 1.0; + let is_orthographic = camera.projection_matrix().w_axis.w == 1.0; let far_z = match config.far_z_mode() { ClusterFarZMode::MaxLightRange => { @@ -914,7 +908,7 @@ pub(crate) fn assign_lights_to_clusters( // 3,2 = r * far and 2,2 = r where r = 1.0 / (far - near) // rearranging r = 1.0 / (far - near), r * (far - near) = 1.0, r * far - 1.0 = r * near, near = (r * far - 1.0) / r // = (3,2 - 1.0) / 2,2 - (camera.projection_matrix.w_axis.z - 1.0) / camera.projection_matrix.z_axis.z + (camera.projection_matrix().w_axis.z - 1.0) / camera.projection_matrix().z_axis.z } (false, 1) => config.first_slice_depth().max(far_z), _ => config.first_slice_depth(), @@ -946,7 +940,7 @@ pub(crate) fn assign_lights_to_clusters( // it can overestimate more significantly when light ranges are only partially in view let (light_aabb_min, light_aabb_max) = cluster_space_light_aabb( inverse_view_transform, - camera.projection_matrix, + camera.projection_matrix(), &light_sphere, ); @@ -1013,9 +1007,9 @@ pub(crate) fn assign_lights_to_clusters( clusters.dimensions.x * clusters.dimensions.y * clusters.dimensions.z <= 4096 ); - let inverse_projection = camera.projection_matrix.inverse(); + let inverse_projection = camera.projection_matrix().inverse(); - for lights in clusters.lights.iter_mut() { + for lights in &mut clusters.lights { lights.entities.clear(); lights.point_light_count = 0; lights.spotlight_count = 0; @@ -1107,7 +1101,7 @@ pub(crate) fn assign_lights_to_clusters( let (light_aabb_xy_ndc_z_view_min, light_aabb_xy_ndc_z_view_max) = cluster_space_light_aabb( inverse_view_transform, - camera.projection_matrix, + camera.projection_matrix(), &light_sphere, ); @@ -1149,7 +1143,7 @@ pub(crate) fn assign_lights_to_clusters( ) }); let light_center_clip = - camera.projection_matrix * view_light_sphere.center.extend(1.0); + camera.projection_matrix() * view_light_sphere.center.extend(1.0); let light_center_ndc = light_center_clip.xyz() / light_center_clip.w; let cluster_coordinates = ndc_position_to_cluster( clusters.dimensions, diff --git a/crates/bevy_pbr/src/material.rs b/crates/bevy_pbr/src/material.rs index b0d20dde6e25a..28620aaec1d03 100644 --- a/crates/bevy_pbr/src/material.rs +++ b/crates/bevy_pbr/src/material.rs @@ -4,7 +4,7 @@ use crate::{ }; use bevy_app::{App, Plugin}; use bevy_asset::{AddAsset, Asset, AssetServer, Handle}; -use bevy_core_pipeline::{AlphaMask3d, Opaque3d, Transparent3d}; +use bevy_core_pipeline::core_3d::{AlphaMask3d, Opaque3d, Transparent3d}; use bevy_ecs::{ entity::Entity, prelude::World, @@ -15,9 +15,9 @@ use bevy_ecs::{ world::FromWorld, }; use bevy_render::{ + extract_component::ExtractComponentPlugin, mesh::{Mesh, MeshVertexBufferLayout}, render_asset::{RenderAsset, RenderAssetPlugin, RenderAssets}, - render_component::ExtractComponentPlugin, render_phase::{ AddRenderCommand, DrawFunctions, EntityRenderCommand, RenderCommandResult, RenderPhase, SetItemPipeline, TrackedRenderPass, @@ -74,6 +74,14 @@ pub trait Material: Asset + RenderAsset + Sized { &[] } + #[allow(unused_variables)] + #[inline] + /// Add a bias to the view depth of the mesh which can be used to force a specific render order + /// for meshes with equal depth, to avoid z-fighting. + fn depth_bias(material: &::PreparedAsset) -> f32 { + 0.0 + } + /// Customizes the default [`RenderPipelineDescriptor`]. #[allow(unused_variables)] #[inline] @@ -127,11 +135,15 @@ impl SpecializedMaterial for M { ::fragment_shader(asset_server) } - #[allow(unused_variables)] #[inline] fn dynamic_uniform_indices(material: &::PreparedAsset) -> &[u32] { ::dynamic_uniform_indices(material) } + + #[inline] + fn depth_bias(material: &::PreparedAsset) -> f32 { + ::depth_bias(material) + } } /// Materials are used alongside [`MaterialPlugin`] and [`MaterialMeshBundle`](crate::MaterialMeshBundle) @@ -189,6 +201,14 @@ pub trait SpecializedMaterial: Asset + RenderAsset + Sized { fn dynamic_uniform_indices(material: &::PreparedAsset) -> &[u32] { &[] } + + #[allow(unused_variables)] + #[inline] + /// Add a bias to the view depth of the mesh which can be used to force a specific render order + /// for meshes with equal depth, to avoid z-fighting. + fn depth_bias(material: &::PreparedAsset) -> f32 { + 0.0 + } } /// Adds the necessary ECS resources and render logic to enable rendering entities using the given [`SpecializedMaterial`] @@ -378,7 +398,8 @@ pub fn queue_material_meshes( // NOTE: row 2 of the inverse view matrix dotted with column 3 of the model matrix // gives the z component of translation of the mesh in view space - let mesh_z = inverse_view_row_2.dot(mesh_uniform.transform.col(3)); + let bias = M::depth_bias(material); + let mesh_z = inverse_view_row_2.dot(mesh_uniform.transform.col(3)) + bias; match alpha_mode { AlphaMode::Opaque => { opaque_phase.add(Opaque3d { diff --git a/crates/bevy_pbr/src/pbr_material.rs b/crates/bevy_pbr/src/pbr_material.rs index 54a74a517d7c1..3c91e7f9ada55 100644 --- a/crates/bevy_pbr/src/pbr_material.rs +++ b/crates/bevy_pbr/src/pbr_material.rs @@ -8,10 +8,7 @@ use bevy_render::{ mesh::MeshVertexBufferLayout, prelude::Shader, render_asset::{PrepareAssetError, RenderAsset, RenderAssets}, - render_resource::{ - std140::{AsStd140, Std140}, - *, - }, + render_resource::*, renderer::RenderDevice, texture::Image, }; @@ -62,6 +59,7 @@ pub struct StandardMaterial { pub cull_mode: Option, pub unlit: bool, pub alpha_mode: AlphaMode, + pub depth_bias: f32, } impl Default for StandardMaterial { @@ -92,6 +90,7 @@ impl Default for StandardMaterial { cull_mode: Some(Face::Back), unlit: false, alpha_mode: AlphaMode::Opaque, + depth_bias: 0.0, } } } @@ -119,7 +118,7 @@ impl From> for StandardMaterial { } } -// NOTE: These must match the bit flags in bevy_pbr/src/render/pbr.wgsl! +// NOTE: These must match the bit flags in bevy_pbr/src/render/pbr_types.wgsl! bitflags::bitflags! { #[repr(transparent)] pub struct StandardMaterialFlags: u32 { @@ -140,7 +139,7 @@ bitflags::bitflags! { } /// The GPU representation of the uniform data of a [`StandardMaterial`]. -#[derive(Clone, Default, AsStd140)] +#[derive(Clone, Default, ShaderType)] pub struct StandardMaterialUniformData { /// Doubles as diffuse albedo for non-metallic, specular for metallic and a mix for everything /// in between. @@ -174,6 +173,7 @@ pub struct GpuStandardMaterial { pub flags: StandardMaterialFlags, pub base_color_texture: Option>, pub alpha_mode: AlphaMode, + pub depth_bias: f32, pub cull_mode: Option, } @@ -268,7 +268,7 @@ impl RenderAsset for StandardMaterial { | TextureFormat::Rg16Unorm | TextureFormat::Bc5RgUnorm | TextureFormat::EacRg11Unorm => { - flags |= StandardMaterialFlags::TWO_COMPONENT_NORMAL_MAP + flags |= StandardMaterialFlags::TWO_COMPONENT_NORMAL_MAP; } _ => {} } @@ -296,12 +296,15 @@ impl RenderAsset for StandardMaterial { flags: flags.bits(), alpha_cutoff, }; - let value_std140 = value.as_std140(); + + let byte_buffer = [0u8; StandardMaterialUniformData::SIZE.get() as usize]; + let mut buffer = encase::UniformBuffer::new(byte_buffer); + buffer.write(&value).unwrap(); let buffer = render_device.create_buffer_with_data(&BufferInitDescriptor { label: Some("pbr_standard_material_uniform_buffer"), usage: BufferUsages::UNIFORM | BufferUsages::COPY_DST, - contents: value_std140.as_bytes(), + contents: buffer.as_ref(), }); let bind_group = render_device.create_bind_group(&BindGroupDescriptor { entries: &[ @@ -361,6 +364,7 @@ impl RenderAsset for StandardMaterial { has_normal_map, base_color_texture: material.base_color_texture, alpha_mode: material.alpha_mode, + depth_bias: material.depth_bias, cull_mode: material.cull_mode, }) } @@ -423,9 +427,7 @@ impl SpecializedMaterial for StandardMaterial { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: BufferSize::new( - StandardMaterialUniformData::std140_size_static() as u64, - ), + min_binding_size: Some(StandardMaterialUniformData::min_size()), }, count: None, }, @@ -528,4 +530,9 @@ impl SpecializedMaterial for StandardMaterial { fn alpha_mode(render_asset: &::PreparedAsset) -> AlphaMode { render_asset.alpha_mode } + + #[inline] + fn depth_bias(material: &::PreparedAsset) -> f32 { + material.depth_bias + } } diff --git a/crates/bevy_pbr/src/render/clustered_forward.wgsl b/crates/bevy_pbr/src/render/clustered_forward.wgsl new file mode 100644 index 0000000000000..a27e4b33b6e0b --- /dev/null +++ b/crates/bevy_pbr/src/render/clustered_forward.wgsl @@ -0,0 +1,100 @@ +#define_import_path bevy_pbr::clustered_forward + +// NOTE: Keep in sync with bevy_pbr/src/light.rs +fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { + var z_slice: u32 = 0u; + if (is_orthographic) { + // NOTE: view_z is correct in the orthographic case + z_slice = u32(floor((view_z - lights.cluster_factors.z) * lights.cluster_factors.w)); + } else { + // NOTE: had to use -view_z to make it positive else log(negative) is nan + z_slice = u32(log(-view_z) * lights.cluster_factors.z - lights.cluster_factors.w + 1.0); + } + // NOTE: We use min as we may limit the far z plane used for clustering to be closeer than + // the furthest thing being drawn. This means that we need to limit to the maximum cluster. + return min(z_slice, lights.cluster_dimensions.z - 1u); +} + +fn fragment_cluster_index(frag_coord: vec2, view_z: f32, is_orthographic: bool) -> u32 { + let xy = vec2(floor(frag_coord * lights.cluster_factors.xy)); + let z_slice = view_z_to_z_slice(view_z, is_orthographic); + // NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer + // arrays based on the cluster index. + return min( + (xy.y * lights.cluster_dimensions.x + xy.x) * lights.cluster_dimensions.z + z_slice, + lights.cluster_dimensions.w - 1u + ); +} + +// this must match CLUSTER_COUNT_SIZE in light.rs +let CLUSTER_COUNT_SIZE = 13u; +fn unpack_offset_and_count(cluster_index: u32) -> vec2 { +#ifdef NO_STORAGE_BUFFERS_SUPPORT + let offset_and_count = cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)]; + return vec2( + // The offset is stored in the upper 32 - CLUSTER_COUNT_SIZE = 19 bits + (offset_and_count >> CLUSTER_COUNT_SIZE) & ((1u << 32u - CLUSTER_COUNT_SIZE) - 1u), + // The count is stored in the lower CLUSTER_COUNT_SIZE = 13 bits + offset_and_count & ((1u << CLUSTER_COUNT_SIZE) - 1u) + ); +#else + return cluster_offsets_and_counts.data[cluster_index]; +#endif +} + +fn get_light_id(index: u32) -> u32 { +#ifdef NO_STORAGE_BUFFERS_SUPPORT + // The index is correct but in cluster_light_index_lists we pack 4 u8s into a u32 + // This means the index into cluster_light_index_lists is index / 4 + let indices = cluster_light_index_lists.data[index >> 4u][(index >> 2u) & ((1u << 2u) - 1u)]; + // And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index + return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u); +#else + return cluster_light_index_lists.data[index]; +#endif +} + +fn cluster_debug_visualization( + output_color: vec4, + view_z: f32, + is_orthographic: bool, + offset_and_count: vec2, + cluster_index: u32, +) -> vec4 { + // Cluster allocation debug (using 'over' alpha blending) +#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES + // NOTE: This debug mode visualises the z-slices + let cluster_overlay_alpha = 0.1; + var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic); + // A hack to make the colors alternate a bit more + if ((z_slice & 1u) == 1u) { + z_slice = z_slice + lights.cluster_dimensions.z / 2u; + } + let slice_color = hsv2rgb(f32(z_slice) / f32(lights.cluster_dimensions.z + 1u), 1.0, 0.5); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY + // NOTE: This debug mode visualises the number of lights within the cluster that contains + // the fragment. It shows a sort of lighting complexity measure. + let cluster_overlay_alpha = 0.1; + let max_light_complexity_per_cluster = 64.0; + output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r + + cluster_overlay_alpha * smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count[1])); + output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g + + cluster_overlay_alpha * (1.0 - smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count[1]))); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + // NOTE: Visualizes the cluster to which the fragment belongs + let cluster_overlay_alpha = 0.1; + let cluster_color = hsv2rgb(random1D(f32(cluster_index)), 1.0, 0.5); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + + return output_color; +} diff --git a/crates/bevy_pbr/src/render/depth.wgsl b/crates/bevy_pbr/src/render/depth.wgsl index 9f5e17fca3433..23c797d9dfa69 100644 --- a/crates/bevy_pbr/src/render/depth.wgsl +++ b/crates/bevy_pbr/src/render/depth.wgsl @@ -1,11 +1,6 @@ -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_view_types +#import bevy_pbr::mesh_types -// NOTE: Keep in sync with pbr.wgsl -struct View { - view_proj: mat4x4; - projection: mat4x4; - world_position: vec3; -}; [[group(0), binding(0)]] var view: View; @@ -18,6 +13,9 @@ var joint_matrices: SkinnedMesh; #import bevy_pbr::skinning #endif +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + struct Vertex { [[location(0)]] position: vec3; #ifdef SKINNED @@ -39,6 +37,6 @@ fn vertex(vertex: Vertex) -> VertexOutput { #endif var out: VertexOutput; - out.clip_position = view.view_proj * model * vec4(vertex.position, 1.0); + out.clip_position = mesh_position_local_to_clip(model, vec4(vertex.position, 1.0)); return out; } diff --git a/crates/bevy_pbr/src/render/light.rs b/crates/bevy_pbr/src/render/light.rs index 5fc61938c6922..edfee231b48dd 100644 --- a/crates/bevy_pbr/src/render/light.rs +++ b/crates/bevy_pbr/src/render/light.rs @@ -5,7 +5,7 @@ use crate::{ SHADOW_SHADER_HANDLE, }; use bevy_asset::Handle; -use bevy_core_pipeline::Transparent3d; +use bevy_core_pipeline::core_3d::Transparent3d; use bevy_ecs::{ prelude::*, system::{lifetimeless::*, SystemParamItem}, @@ -22,7 +22,7 @@ use bevy_render::{ EntityRenderCommand, PhaseItem, RenderCommandResult, RenderPhase, SetItemPipeline, TrackedRenderPass, }, - render_resource::{std140::AsStd140, std430::AsStd430, *}, + render_resource::*, renderer::{RenderContext, RenderDevice, RenderQueue}, texture::*, view::{ @@ -35,7 +35,7 @@ use bevy_utils::{ tracing::{error, warn}, HashMap, }; -use std::num::NonZeroU32; +use std::num::{NonZeroU32, NonZeroU64}; #[derive(Debug, Hash, PartialEq, Eq, Clone, SystemLabel)] pub enum RenderLightSystems { @@ -46,11 +46,6 @@ pub enum RenderLightSystems { QueueShadows, } -pub struct ExtractedAmbientLight { - color: Color, - brightness: f32, -} - #[derive(Component)] pub struct ExtractedPointLight { color: Color, @@ -65,8 +60,6 @@ pub struct ExtractedPointLight { spotlight_angles: Option<(f32, f32)>, } -pub type ExtractedPointLightShadowMap = PointLightShadowMap; - #[derive(Component)] pub struct ExtractedDirectionalLight { color: Color, @@ -78,10 +71,7 @@ pub struct ExtractedDirectionalLight { shadow_normal_bias: f32, } -pub type ExtractedDirectionalLightShadowMap = DirectionalLightShadowMap; - -#[repr(C)] -#[derive(Copy, Clone, AsStd140, AsStd430, Default, Debug)] +#[derive(Copy, Clone, ShaderType, Default, Debug)] pub struct GpuPointLight { // For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3] // For spotlights: 2 components of the direction (x,z), spot_scale and spot_offset @@ -94,13 +84,28 @@ pub struct GpuPointLight { spotlight_tan_angle: f32, } +#[derive(ShaderType)] +pub struct GpuPointLightsUniform { + data: Box<[GpuPointLight; MAX_UNIFORM_BUFFER_POINT_LIGHTS]>, +} + +impl Default for GpuPointLightsUniform { + fn default() -> Self { + Self { + data: Box::new([GpuPointLight::default(); MAX_UNIFORM_BUFFER_POINT_LIGHTS]), + } + } +} + +#[derive(ShaderType, Default)] +pub struct GpuPointLightsStorage { + #[size(runtime)] + data: Vec, +} + pub enum GpuPointLights { - Uniform { - buffer: UniformVec<[GpuPointLight; MAX_UNIFORM_BUFFER_POINT_LIGHTS]>, - }, - Storage { - buffer: StorageBuffer, - }, + Uniform(UniformBuffer), + Storage(StorageBuffer), } impl GpuPointLights { @@ -112,66 +117,48 @@ impl GpuPointLights { } fn uniform() -> Self { - Self::Uniform { - buffer: UniformVec::default(), - } + Self::Uniform(UniformBuffer::default()) } fn storage() -> Self { - Self::Storage { - buffer: StorageBuffer::default(), - } - } - - fn clear(&mut self) { - match self { - GpuPointLights::Uniform { buffer } => buffer.clear(), - GpuPointLights::Storage { buffer } => buffer.clear(), - } + Self::Storage(StorageBuffer::default()) } - fn push(&mut self, mut lights: Vec) { + fn set(&mut self, mut lights: Vec) { match self { - GpuPointLights::Uniform { buffer } => { - // NOTE: This iterator construction allows moving and padding with default - // values and is like this to avoid unnecessary cloning. - let gpu_point_lights = lights - .drain(..) - .chain(std::iter::repeat_with(GpuPointLight::default)) - .take(MAX_UNIFORM_BUFFER_POINT_LIGHTS) - .collect::>(); - buffer.push(gpu_point_lights.try_into().unwrap()); + GpuPointLights::Uniform(buffer) => { + let len = lights.len().min(MAX_UNIFORM_BUFFER_POINT_LIGHTS); + let src = &lights[..len]; + let dst = &mut buffer.get_mut().data[..len]; + dst.copy_from_slice(src); } - GpuPointLights::Storage { buffer } => { - buffer.append(&mut lights); + GpuPointLights::Storage(buffer) => { + buffer.get_mut().data.clear(); + buffer.get_mut().data.append(&mut lights); } } } fn write_buffer(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { match self { - GpuPointLights::Uniform { buffer } => buffer.write_buffer(render_device, render_queue), - GpuPointLights::Storage { buffer } => buffer.write_buffer(render_device, render_queue), + GpuPointLights::Uniform(buffer) => buffer.write_buffer(render_device, render_queue), + GpuPointLights::Storage(buffer) => buffer.write_buffer(render_device, render_queue), } } pub fn binding(&self) -> Option { match self { - GpuPointLights::Uniform { buffer } => buffer.binding(), - GpuPointLights::Storage { buffer } => buffer.binding(), + GpuPointLights::Uniform(buffer) => buffer.binding(), + GpuPointLights::Storage(buffer) => buffer.binding(), } } - pub fn len(&self) -> usize { - match self { - GpuPointLights::Uniform { buffer } => buffer.len(), - GpuPointLights::Storage { buffer } => buffer.values().len(), + pub fn min_size(buffer_binding_type: BufferBindingType) -> NonZeroU64 { + match buffer_binding_type { + BufferBindingType::Storage { .. } => GpuPointLightsStorage::min_size(), + BufferBindingType::Uniform => GpuPointLightsUniform::min_size(), } } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } } // NOTE: These must match the bit flags in bevy_pbr2/src/render/pbr.frag! @@ -186,8 +173,7 @@ bitflags::bitflags! { } } -#[repr(C)] -#[derive(Copy, Clone, AsStd140, Default, Debug)] +#[derive(Copy, Clone, ShaderType, Default, Debug)] pub struct GpuDirectionalLight { view_projection: Mat4, color: Vec4, @@ -207,10 +193,8 @@ bitflags::bitflags! { } } -#[repr(C)] -#[derive(Copy, Clone, Debug, AsStd140)] +#[derive(Copy, Clone, Debug, ShaderType)] pub struct GpuLights { - // TODO: this comes first to work around a WGSL alignment issue. We need to solve this issue before releasing the renderer rework directional_lights: [GpuDirectionalLight; MAX_DIRECTIONAL_LIGHTS], ambient_color: Vec4, // xyz are x/y/z cluster dimensions and w is the number of clusters @@ -252,7 +236,7 @@ impl FromWorld for ShadowPipeline { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: BufferSize::new(ViewUniform::std140_size_static() as u64), + min_binding_size: Some(ViewUniform::min_size()), }, count: None, }, @@ -422,7 +406,6 @@ pub fn extract_clusters(mut commands: Commands, views: Query<(Entity, &Clusters) #[allow(clippy::too_many_arguments)] pub fn extract_lights( mut commands: Commands, - ambient_light: Res, point_light_shadow_map: Res, directional_light_shadow_map: Res, global_point_lights: Res, @@ -446,14 +429,14 @@ pub fn extract_lights( mut previous_point_lights_len: Local, mut previous_spotlights_len: Local, ) { - commands.insert_resource(ExtractedAmbientLight { - color: ambient_light.color, - brightness: ambient_light.brightness, - }); - commands.insert_resource::(point_light_shadow_map.clone()); - commands.insert_resource::( - directional_light_shadow_map.clone(), - ); + // NOTE: These shadow map resources are extracted here as they are used here too so this avoids + // races between scheduling of ExtractResourceSystems and this system. + if point_light_shadow_map.is_changed() { + commands.insert_resource(point_light_shadow_map.clone()); + } + if directional_light_shadow_map.is_changed() { + commands.insert_resource(directional_light_shadow_map.clone()); + } // This is the point light shadow map texel size for one face of the cube as a distance of 1.0 // world unit from the light. // point_light_texel_size = 2.0 * 1.0 * tan(PI / 4.0) / cube face width in texels @@ -681,7 +664,7 @@ impl GlobalLightMeta { #[derive(Default)] pub struct LightMeta { - pub view_gpu_lights: DynamicUniformVec, + pub view_gpu_lights: DynamicUniformBuffer, pub shadow_view_bind_group: Option, } @@ -759,9 +742,9 @@ pub fn prepare_lights( (Entity, &ExtractedView, &ExtractedClusterConfig), With>, >, - ambient_light: Res, - point_light_shadow_map: Res, - directional_light_shadow_map: Res, + ambient_light: Res, + point_light_shadow_map: Res, + directional_light_shadow_map: Res, point_lights: Query<(Entity, &ExtractedPointLight)>, directional_lights: Query<(Entity, &ExtractedDirectionalLight)>, ) { @@ -775,7 +758,6 @@ pub fn prepare_lights( .map(|CubeMapFace { target, up }| GlobalTransform::identity().looking_at(*target, *up)) .collect::>(); - global_light_meta.gpu_point_lights.clear(); global_light_meta.entity_to_index.clear(); let mut point_lights: Vec<_> = point_lights.iter().collect::>(); @@ -896,7 +878,8 @@ pub fn prepare_lights( }); global_light_meta.entity_to_index.insert(entity, index); } - global_light_meta.gpu_point_lights.push(gpu_point_lights); + + global_light_meta.gpu_point_lights.set(gpu_point_lights); global_light_meta .gpu_point_lights .write_buffer(&render_device, &render_queue); @@ -1235,16 +1218,58 @@ fn pack_offset_and_counts(offset: usize, point_count: usize, spot_count: usize) | (spot_count as u32 & CLUSTER_COUNT_MASK) } +#[derive(ShaderType)] +struct GpuClusterLightIndexListsUniform { + data: Box<[UVec4; ViewClusterBindings::MAX_UNIFORM_ITEMS]>, +} + +// NOTE: Assert at compile time that GpuClusterLightIndexListsUniform +// fits within the maximum uniform buffer binding size +const _: () = assert!(GpuClusterLightIndexListsUniform::SIZE.get() <= 16384); + +impl Default for GpuClusterLightIndexListsUniform { + fn default() -> Self { + Self { + data: Box::new([UVec4::ZERO; ViewClusterBindings::MAX_UNIFORM_ITEMS]), + } + } +} + +#[derive(ShaderType)] +struct GpuClusterOffsetsAndCountsUniform { + data: Box<[UVec4; ViewClusterBindings::MAX_UNIFORM_ITEMS]>, +} + +impl Default for GpuClusterOffsetsAndCountsUniform { + fn default() -> Self { + Self { + data: Box::new([UVec4::ZERO; ViewClusterBindings::MAX_UNIFORM_ITEMS]), + } + } +} + +#[derive(ShaderType, Default)] +struct GpuClusterLightIndexListsStorage { + #[size(runtime)] + data: Vec, +} + +#[derive(ShaderType, Default)] +struct GpuClusterOffsetsAndCountsStorage { + #[size(runtime)] + data: Vec, +} + enum ViewClusterBuffers { Uniform { // NOTE: UVec4 is because all arrays in Std140 layout have 16-byte alignment - cluster_light_index_lists: UniformVec<[UVec4; ViewClusterBindings::MAX_UNIFORM_ITEMS]>, + cluster_light_index_lists: UniformBuffer, // NOTE: UVec4 is because all arrays in Std140 layout have 16-byte alignment - cluster_offsets_and_counts: UniformVec<[UVec4; ViewClusterBindings::MAX_UNIFORM_ITEMS]>, + cluster_offsets_and_counts: UniformBuffer, }, Storage { - cluster_light_index_lists: StorageBuffer, - cluster_offsets_and_counts: StorageBuffer, + cluster_light_index_lists: StorageBuffer, + cluster_offsets_and_counts: StorageBuffer, }, } @@ -1258,8 +1283,8 @@ impl ViewClusterBuffers { fn uniform() -> Self { ViewClusterBuffers::Uniform { - cluster_light_index_lists: UniformVec::default(), - cluster_offsets_and_counts: UniformVec::default(), + cluster_light_index_lists: UniformBuffer::default(), + cluster_offsets_and_counts: UniformBuffer::default(), } } @@ -1291,24 +1316,22 @@ impl ViewClusterBindings { } } - pub fn reserve_and_clear(&mut self) { + pub fn clear(&mut self) { match &mut self.buffers { ViewClusterBuffers::Uniform { cluster_light_index_lists, cluster_offsets_and_counts, } => { - cluster_light_index_lists.clear(); - cluster_light_index_lists.push([UVec4::ZERO; Self::MAX_UNIFORM_ITEMS]); - cluster_offsets_and_counts.clear(); - cluster_offsets_and_counts.push([UVec4::ZERO; Self::MAX_UNIFORM_ITEMS]); + *cluster_light_index_lists.get_mut().data = [UVec4::ZERO; Self::MAX_UNIFORM_ITEMS]; + *cluster_offsets_and_counts.get_mut().data = [UVec4::ZERO; Self::MAX_UNIFORM_ITEMS]; } ViewClusterBuffers::Storage { cluster_light_index_lists, cluster_offsets_and_counts, .. } => { - cluster_light_index_lists.clear(); - cluster_offsets_and_counts.clear(); + cluster_light_index_lists.get_mut().data.clear(); + cluster_offsets_and_counts.get_mut().data.clear(); } } } @@ -1327,18 +1350,16 @@ impl ViewClusterBindings { let component = self.n_offsets & ((1 << 2) - 1); let packed = pack_offset_and_counts(offset, point_count, spot_count); - cluster_offsets_and_counts.get_mut(0)[array_index][component] = packed; + cluster_offsets_and_counts.get_mut().data[array_index][component] = packed; } ViewClusterBuffers::Storage { cluster_offsets_and_counts, .. } => { - cluster_offsets_and_counts.push(UVec4::new( - offset as u32, - point_count as u32, - spot_count as u32, - 0, - )); + cluster_offsets_and_counts + .get_mut() + .data + .push(UVec2::new(offset as u32, count as u32)); } } @@ -1360,14 +1381,14 @@ impl ViewClusterBindings { let sub_index = self.n_indices & ((1 << 2) - 1); let index = index as u32; - cluster_light_index_lists.get_mut(0)[array_index][component] |= + cluster_light_index_lists.get_mut().data[array_index][component] |= index << (8 * sub_index); } ViewClusterBuffers::Storage { cluster_light_index_lists, .. } => { - cluster_light_index_lists.push(index as u32); + cluster_light_index_lists.get_mut().data.push(index as u32); } } @@ -1418,6 +1439,24 @@ impl ViewClusterBindings { } => cluster_offsets_and_counts.binding(), } } + + pub fn min_size_cluster_light_index_lists( + buffer_binding_type: BufferBindingType, + ) -> NonZeroU64 { + match buffer_binding_type { + BufferBindingType::Storage { .. } => GpuClusterLightIndexListsStorage::min_size(), + BufferBindingType::Uniform => GpuClusterLightIndexListsUniform::min_size(), + } + } + + pub fn min_size_cluster_offsets_and_counts( + buffer_binding_type: BufferBindingType, + ) -> NonZeroU64 { + match buffer_binding_type { + BufferBindingType::Storage { .. } => GpuClusterOffsetsAndCountsStorage::min_size(), + BufferBindingType::Uniform => GpuClusterOffsetsAndCountsUniform::min_size(), + } + } } pub fn prepare_clusters( @@ -1443,7 +1482,7 @@ pub fn prepare_clusters( for (entity, cluster_config, extracted_clusters) in views.iter() { let mut view_clusters_bindings = ViewClusterBindings::new(mesh_pipeline.clustered_forward_buffer_binding_type); - view_clusters_bindings.reserve_and_clear(); + view_clusters_bindings.clear(); let mut indices_full = false; diff --git a/crates/bevy_pbr/src/render/mesh.rs b/crates/bevy_pbr/src/render/mesh.rs index f90d3894bbf69..950d32d6fba18 100644 --- a/crates/bevy_pbr/src/render/mesh.rs +++ b/crates/bevy_pbr/src/render/mesh.rs @@ -1,27 +1,29 @@ use crate::{ - GlobalLightMeta, GpuLights, LightMeta, NotShadowCaster, NotShadowReceiver, ShadowPipeline, - ViewClusterBindings, ViewLightsUniformOffset, ViewShadowBindings, + GlobalLightMeta, GpuLights, GpuPointLights, LightMeta, NotShadowCaster, NotShadowReceiver, + ShadowPipeline, ViewClusterBindings, ViewLightsUniformOffset, ViewShadowBindings, CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT, }; use bevy_app::Plugin; use bevy_asset::{load_internal_asset, Assets, Handle, HandleUntyped}; use bevy_ecs::{ prelude::*, - system::{lifetimeless::*, SystemParamItem}, + system::{lifetimeless::*, SystemParamItem, SystemState}, }; use bevy_math::{Mat4, Vec2}; use bevy_reflect::TypeUuid; use bevy_render::{ + extract_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin}, mesh::{ skinning::{SkinnedMesh, SkinnedMeshInverseBindposes}, GpuBufferInfo, Mesh, MeshVertexBufferLayout, }, render_asset::RenderAssets, - render_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin}, render_phase::{EntityRenderCommand, RenderCommandResult, TrackedRenderPass}, - render_resource::{std140::AsStd140, *}, + render_resource::*, renderer::{RenderDevice, RenderQueue}, - texture::{BevyDefault, GpuImage, Image, TextureFormatPixelInfo}, + texture::{ + BevyDefault, DefaultImageSampler, GpuImage, Image, ImageSampler, TextureFormatPixelInfo, + }, view::{ComputedVisibility, ViewUniform, ViewUniformOffset, ViewUniforms}, RenderApp, RenderStage, }; @@ -35,10 +37,16 @@ const MAX_JOINTS: usize = 256; const JOINT_SIZE: usize = std::mem::size_of::(); pub(crate) const JOINT_BUFFER_SIZE: usize = MAX_JOINTS * JOINT_SIZE; -pub const MESH_VIEW_BIND_GROUP_HANDLE: HandleUntyped = +pub const MESH_VIEW_TYPES_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 8140454348013264787); +pub const MESH_VIEW_BINDINGS_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 9076678235888822571); -pub const MESH_STRUCT_HANDLE: HandleUntyped = +pub const MESH_TYPES_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 2506024101911992377); +pub const MESH_BINDINGS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 16831548636314682308); +pub const MESH_FUNCTIONS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 6300874327833745635); pub const MESH_SHADER_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 3252377289100772450); pub const SKINNING_HANDLE: HandleUntyped = @@ -46,19 +54,32 @@ pub const SKINNING_HANDLE: HandleUntyped = impl Plugin for MeshRenderPlugin { fn build(&self, app: &mut bevy_app::App) { - load_internal_asset!(app, MESH_SHADER_HANDLE, "mesh.wgsl", Shader::from_wgsl); load_internal_asset!( app, - MESH_STRUCT_HANDLE, - "mesh_struct.wgsl", + MESH_VIEW_TYPES_HANDLE, + "mesh_view_types.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + MESH_VIEW_BINDINGS_HANDLE, + "mesh_view_bindings.wgsl", Shader::from_wgsl ); + load_internal_asset!(app, MESH_TYPES_HANDLE, "mesh_types.wgsl", Shader::from_wgsl); load_internal_asset!( app, - MESH_VIEW_BIND_GROUP_HANDLE, - "mesh_view_bind_group.wgsl", + MESH_BINDINGS_HANDLE, + "mesh_bindings.wgsl", Shader::from_wgsl ); + load_internal_asset!( + app, + MESH_FUNCTIONS_HANDLE, + "mesh_functions.wgsl", + Shader::from_wgsl + ); + load_internal_asset!(app, MESH_SHADER_HANDLE, "mesh.wgsl", Shader::from_wgsl); load_internal_asset!(app, SKINNING_HANDLE, "skinning.wgsl", Shader::from_wgsl); app.add_plugin(UniformComponentPlugin::::default()); @@ -76,7 +97,7 @@ impl Plugin for MeshRenderPlugin { } } -#[derive(Component, AsStd140, Clone)] +#[derive(Component, ShaderType, Clone)] pub struct MeshUniform { pub transform: Mat4, pub inverse_transpose_model: Mat4, @@ -264,13 +285,15 @@ pub struct MeshPipeline { impl FromWorld for MeshPipeline { fn from_world(world: &mut World) -> Self { - let render_device = world.resource::(); + let mut system_state: SystemState<( + Res, + Res, + Res, + )> = SystemState::new(world); + let (render_device, default_sampler, render_queue) = system_state.get_mut(world); let clustered_forward_buffer_binding_type = render_device .get_supported_read_only_binding_type(CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT); - let cluster_min_binding_size = match clustered_forward_buffer_binding_type { - BufferBindingType::Storage { .. } => None, - BufferBindingType::Uniform => BufferSize::new(16384), - }; + let view_layout = render_device.create_bind_group_layout(&BindGroupLayoutDescriptor { entries: &[ // View @@ -280,7 +303,7 @@ impl FromWorld for MeshPipeline { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: BufferSize::new(ViewUniform::std140_size_static() as u64), + min_binding_size: Some(ViewUniform::min_size()), }, count: None, }, @@ -291,7 +314,7 @@ impl FromWorld for MeshPipeline { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: BufferSize::new(GpuLights::std140_size_static() as u64), + min_binding_size: Some(GpuLights::min_size()), }, count: None, }, @@ -344,10 +367,9 @@ impl FromWorld for MeshPipeline { ty: BindingType::Buffer { ty: clustered_forward_buffer_binding_type, has_dynamic_offset: false, - // NOTE (when no storage buffers): Static size for uniform buffers. - // GpuPointLight has a padded size of 64 bytes, so 16384 / 64 = 256 - // point lights max - min_binding_size: cluster_min_binding_size, + min_binding_size: Some(GpuPointLights::min_size( + clustered_forward_buffer_binding_type, + )), }, count: None, }, @@ -358,9 +380,11 @@ impl FromWorld for MeshPipeline { ty: BindingType::Buffer { ty: clustered_forward_buffer_binding_type, has_dynamic_offset: false, - // NOTE (when no storage buffers): With 256 point lights max, indices - // need 8 bits so use u8 - min_binding_size: cluster_min_binding_size, + min_binding_size: Some( + ViewClusterBindings::min_size_cluster_light_index_lists( + clustered_forward_buffer_binding_type, + ), + ), }, count: None, }, @@ -371,12 +395,11 @@ impl FromWorld for MeshPipeline { ty: BindingType::Buffer { ty: clustered_forward_buffer_binding_type, has_dynamic_offset: false, - // NOTE (when no storage buffers): The offset needs to address 16384 - // indices, which needs 14 bits. The count can be at most all 256 lights - // so 8 bits. - // NOTE: Pack the offset into the upper 19 bits and the count into the - // lower 13 bits. - min_binding_size: cluster_min_binding_size, + min_binding_size: Some( + ViewClusterBindings::min_size_cluster_offsets_and_counts( + clustered_forward_buffer_binding_type, + ), + ), }, count: None, }, @@ -390,7 +413,7 @@ impl FromWorld for MeshPipeline { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: BufferSize::new(MeshUniform::std140_size_static() as u64), + min_binding_size: Some(MeshUniform::min_size()), }, count: None, }; @@ -427,10 +450,12 @@ impl FromWorld for MeshPipeline { TextureFormat::bevy_default(), ); let texture = render_device.create_texture(&image.texture_descriptor); - let sampler = render_device.create_sampler(&image.sampler_descriptor); + let sampler = match image.sampler_descriptor { + ImageSampler::Default => (**default_sampler).clone(), + ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + }; let format_size = image.texture_descriptor.format.pixel_size(); - let render_queue = world.resource_mut::(); render_queue.write_texture( ImageCopyTexture { texture: &texture, @@ -565,18 +590,6 @@ impl SpecializedMeshPipeline for MeshPipeline { vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(4)); } - // TODO: consider exposing this in shaders in a more generally useful way, such as: - // # if AVAILABLE_STORAGE_BUFFER_BINDINGS == 3 - // /* use storage buffers here */ - // # elif - // /* use uniforms here */ - if !matches!( - self.clustered_forward_buffer_binding_type, - BufferBindingType::Storage { .. } - ) { - shader_defs.push(String::from("NO_STORAGE_BUFFERS_SUPPORT")); - } - let mut bind_group_layout = vec![self.view_layout.clone()]; if layout.contains(Mesh::ATTRIBUTE_JOINT_INDEX) && layout.contains(Mesh::ATTRIBUTE_JOINT_WEIGHT) @@ -607,9 +620,6 @@ impl SpecializedMeshPipeline for MeshPipeline { depth_write_enabled = true; } - #[cfg(feature = "webgl")] - shader_defs.push(String::from("NO_ARRAY_TEXTURES_SUPPORT")); - Ok(RenderPipelineDescriptor { vertex: VertexState { shader: MESH_SHADER_HANDLE.typed::(), @@ -688,7 +698,7 @@ pub fn queue_mesh_bind_group( skinned: None, }; - if let Some(skinned_joints_buffer) = skinned_mesh_uniform.buffer.uniform_buffer() { + if let Some(skinned_joints_buffer) = skinned_mesh_uniform.buffer.buffer() { mesh_bind_group.skinned = Some(render_device.create_bind_group(&BindGroupDescriptor { entries: &[ BindGroupEntry { @@ -712,9 +722,22 @@ pub fn queue_mesh_bind_group( } } -#[derive(Default)] +// NOTE: This is using BufferVec because it is using a trick to allow a fixed-size array +// in a uniform buffer to be used like a variable-sized array by only writing the valid data +// into the buffer, knowing the number of valid items starting from the dynamic offset, and +// ignoring the rest, whether they're valid for other dynamic offsets or not. This trick may +// be supported later in encase, and then we should make use of it. + pub struct SkinnedMeshUniform { - pub buffer: UniformVec, + pub buffer: BufferVec, +} + +impl Default for SkinnedMeshUniform { + fn default() -> Self { + Self { + buffer: BufferVec::new(BufferUsages::UNIFORM), + } + } } pub fn prepare_skinned_meshes( @@ -731,7 +754,7 @@ pub fn prepare_skinned_meshes( skinned_mesh_uniform .buffer .reserve(extracted_joints.buffer.len(), &render_device); - for joint in extracted_joints.buffer.iter() { + for joint in &extracted_joints.buffer { skinned_mesh_uniform.buffer.push(*joint); } skinned_mesh_uniform diff --git a/crates/bevy_pbr/src/render/mesh.wgsl b/crates/bevy_pbr/src/render/mesh.wgsl index 2adaab7c086a7..82ad909e5ce21 100644 --- a/crates/bevy_pbr/src/render/mesh.wgsl +++ b/crates/bevy_pbr/src/render/mesh.wgsl @@ -1,5 +1,8 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_bindings + +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions struct Vertex { [[location(0)]] position: vec3; @@ -30,48 +33,26 @@ struct VertexOutput { #endif }; -[[group(2), binding(0)]] -var mesh: Mesh; -#ifdef SKINNED -[[group(2), binding(1)]] -var joint_matrices: SkinnedMesh; -#import bevy_pbr::skinning -#endif - [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { var out: VertexOutput; #ifdef SKINNED var model = skin_model(vertex.joint_indices, vertex.joint_weights); - out.world_position = model * vec4(vertex.position, 1.0); out.world_normal = skin_normals(model, vertex.normal); -#ifdef VERTEX_TANGENTS - out.world_tangent = skin_tangents(model, vertex.tangent); -#endif #else - out.world_position = mesh.model * vec4(vertex.position, 1.0); - out.world_normal = mat3x3( - mesh.inverse_transpose_model[0].xyz, - mesh.inverse_transpose_model[1].xyz, - mesh.inverse_transpose_model[2].xyz - ) * vertex.normal; -#ifdef VERTEX_TANGENTS - out.world_tangent = vec4( - mat3x3( - mesh.model[0].xyz, - mesh.model[1].xyz, - mesh.model[2].xyz - ) * vertex.tangent.xyz, - vertex.tangent.w - ); + var model = mesh.model; #endif + out.world_position = mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); + out.world_normal = mesh_normal_local_to_world(vertex.normal); + out.uv = vertex.uv; +#ifdef VERTEX_TANGENTS + out.world_tangent = mesh_tangent_local_to_world(model, vertex.tangent); #endif #ifdef VERTEX_COLORS out.color = vertex.color; -#endif +#endif - out.uv = vertex.uv; - out.clip_position = view.view_proj * out.world_position; + out.clip_position = mesh_position_world_to_clip(out.world_position); return out; } diff --git a/crates/bevy_pbr/src/render/mesh_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_bindings.wgsl new file mode 100644 index 0000000000000..8f0c69a781832 --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_bindings.wgsl @@ -0,0 +1,11 @@ +#define_import_path bevy_pbr::mesh_bindings + +#import bevy_pbr::mesh_types + +[[group(2), binding(0)]] +var mesh: Mesh; +#ifdef SKINNED +[[group(2), binding(1)]] +var joint_matrices: SkinnedMesh; +#import bevy_pbr::skinning +#endif diff --git a/crates/bevy_pbr/src/render/mesh_functions.wgsl b/crates/bevy_pbr/src/render/mesh_functions.wgsl new file mode 100644 index 0000000000000..20c763bd22d79 --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_functions.wgsl @@ -0,0 +1,36 @@ +#define_import_path bevy_pbr::mesh_functions + +fn mesh_position_local_to_world(model: mat4x4, vertex_position: vec4) -> vec4 { + return model * vertex_position; +} + +fn mesh_position_world_to_clip(world_position: vec4) -> vec4 { + return view.view_proj * world_position; +} + +// NOTE: The intermediate world_position assignment is important +// for precision purposes when using the 'equals' depth comparison +// function. +fn mesh_position_local_to_clip(model: mat4x4, vertex_position: vec4) -> vec4 { + let world_position = mesh_position_local_to_world(model, vertex_position); + return mesh_position_world_to_clip(world_position); +} + +fn mesh_normal_local_to_world(vertex_normal: vec3) -> vec3 { + return mat3x3( + mesh.inverse_transpose_model[0].xyz, + mesh.inverse_transpose_model[1].xyz, + mesh.inverse_transpose_model[2].xyz + ) * vertex_normal; +} + +fn mesh_tangent_local_to_world(model: mat4x4, vertex_tangent: vec4) -> vec4 { + return vec4( + mat3x3( + model[0].xyz, + model[1].xyz, + model[2].xyz + ) * vertex_tangent.xyz, + vertex_tangent.w + ); +} diff --git a/crates/bevy_pbr/src/render/mesh_struct.wgsl b/crates/bevy_pbr/src/render/mesh_types.wgsl similarity index 88% rename from crates/bevy_pbr/src/render/mesh_struct.wgsl rename to crates/bevy_pbr/src/render/mesh_types.wgsl index de29921f46f7b..c04720d551e42 100644 --- a/crates/bevy_pbr/src/render/mesh_struct.wgsl +++ b/crates/bevy_pbr/src/render/mesh_types.wgsl @@ -1,4 +1,4 @@ -#define_import_path bevy_pbr::mesh_struct +#define_import_path bevy_pbr::mesh_types struct Mesh { model: mat4x4; diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl new file mode 100644 index 0000000000000..ec6f5dbb4398c --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl @@ -0,0 +1,42 @@ +#define_import_path bevy_pbr::mesh_view_bindings + +#import bevy_pbr::mesh_view_types + +[[group(0), binding(0)]] +var view: View; +[[group(0), binding(1)]] +var lights: Lights; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +[[group(0), binding(2)]] +var point_shadow_textures: texture_depth_cube; +#else +[[group(0), binding(2)]] +var point_shadow_textures: texture_depth_cube_array; +#endif +[[group(0), binding(3)]] +var point_shadow_textures_sampler: sampler_comparison; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +[[group(0), binding(4)]] +var directional_shadow_textures: texture_depth_2d; +#else +[[group(0), binding(4)]] +var directional_shadow_textures: texture_depth_2d_array; +#endif +[[group(0), binding(5)]] +var directional_shadow_textures_sampler: sampler_comparison; + +#ifdef NO_STORAGE_BUFFERS_SUPPORT +[[group(0), binding(6)]] +var point_lights: PointLights; +[[group(0), binding(7)]] +var cluster_light_index_lists: ClusterLightIndexLists; +[[group(0), binding(8)]] +var cluster_offsets_and_counts: ClusterOffsetsAndCounts; +#else +[[group(0), binding(6)]] +var point_lights: PointLights; +[[group(0), binding(7)]] +var cluster_light_index_lists: ClusterLightIndexLists; +[[group(0), binding(8)]] +var cluster_offsets_and_counts: ClusterOffsetsAndCounts; +#endif diff --git a/crates/bevy_pbr/src/render/mesh_view_bind_group.wgsl b/crates/bevy_pbr/src/render/mesh_view_types.wgsl similarity index 67% rename from crates/bevy_pbr/src/render/mesh_view_bind_group.wgsl rename to crates/bevy_pbr/src/render/mesh_view_types.wgsl index b1239fc4a171b..463b88933ad99 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bind_group.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_types.wgsl @@ -1,4 +1,4 @@ -#define_import_path bevy_pbr::mesh_view_bind_group +#define_import_path bevy_pbr::mesh_view_types struct View { view_proj: mat4x4; @@ -84,42 +84,3 @@ struct ClusterOffsetsAndCounts { data: array>; }; #endif - -[[group(0), binding(0)]] -var view: View; -[[group(0), binding(1)]] -var lights: Lights; -#ifdef NO_ARRAY_TEXTURES_SUPPORT -[[group(0), binding(2)]] -var point_shadow_textures: texture_depth_cube; -#else -[[group(0), binding(2)]] -var point_shadow_textures: texture_depth_cube_array; -#endif -[[group(0), binding(3)]] -var point_shadow_textures_sampler: sampler_comparison; -#ifdef NO_ARRAY_TEXTURES_SUPPORT -[[group(0), binding(4)]] -var directional_shadow_textures: texture_depth_2d; -#else -[[group(0), binding(4)]] -var directional_shadow_textures: texture_depth_2d_array; -#endif -[[group(0), binding(5)]] -var directional_shadow_textures_sampler: sampler_comparison; - -#ifdef NO_STORAGE_BUFFERS_SUPPORT -[[group(0), binding(6)]] -var point_lights: PointLights; -[[group(0), binding(7)]] -var cluster_light_index_lists: ClusterLightIndexLists; -[[group(0), binding(8)]] -var cluster_offsets_and_counts: ClusterOffsetsAndCounts; -#else -[[group(0), binding(6)]] -var point_lights: PointLights; -[[group(0), binding(7)]] -var cluster_light_index_lists: ClusterLightIndexLists; -[[group(0), binding(8)]] -var cluster_offsets_and_counts: ClusterOffsetsAndCounts; -#endif diff --git a/crates/bevy_pbr/src/render/pbr.wgsl b/crates/bevy_pbr/src/render/pbr.wgsl index 1012bef1bc8c3..3f169d5104f66 100644 --- a/crates/bevy_pbr/src/render/pbr.wgsl +++ b/crates/bevy_pbr/src/render/pbr.wgsl @@ -1,544 +1,12 @@ -// From the Filament design doc -// https://google.github.io/filament/Filament.html#table_symbols -// Symbol Definition -// v View unit vector -// l Incident light unit vector -// n Surface normal unit vector -// h Half unit vector between l and v -// f BRDF -// f_d Diffuse component of a BRDF -// f_r Specular component of a BRDF -// α Roughness, remapped from using input perceptualRoughness -// σ Diffuse reflectance -// Ω Spherical domain -// f0 Reflectance at normal incidence -// f90 Reflectance at grazing angle -// χ+(a) Heaviside function (1 if a>0 and 0 otherwise) -// nior Index of refraction (IOR) of an interface -// ⟨n⋅l⟩ Dot product clamped to [0..1] -// ⟨a⟩ Saturated value (clamped to [0..1]) +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::pbr_bindings +#import bevy_pbr::mesh_bindings -// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material -// and consists of two components, the diffuse component (f_d) and the specular component (f_r): -// f(v,l) = f_d(v,l) + f_r(v,l) -// -// The form of the microfacet model is the same for diffuse and specular -// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm -// -// In which: -// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets -// G models the visibility (or occlusion or shadow-masking) of the microfacets -// f_m is the microfacet BRDF and differs between specular and diffuse components -// -// The above integration needs to be approximated. - -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct - -[[group(2), binding(0)]] -var mesh: Mesh; - -struct StandardMaterial { - base_color: vec4; - emissive: vec4; - perceptual_roughness: f32; - metallic: f32; - reflectance: f32; - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32; - alpha_cutoff: f32; -}; - -let STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; -let STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; -let STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; -let STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; -let STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; -let STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u; -let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u; -let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u; -let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u; -let STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 512u; -let STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 1024u; - -[[group(1), binding(0)]] -var material: StandardMaterial; -[[group(1), binding(1)]] -var base_color_texture: texture_2d; -[[group(1), binding(2)]] -var base_color_sampler: sampler; -[[group(1), binding(3)]] -var emissive_texture: texture_2d; -[[group(1), binding(4)]] -var emissive_sampler: sampler; -[[group(1), binding(5)]] -var metallic_roughness_texture: texture_2d; -[[group(1), binding(6)]] -var metallic_roughness_sampler: sampler; -[[group(1), binding(7)]] -var occlusion_texture: texture_2d; -[[group(1), binding(8)]] -var occlusion_sampler: sampler; -[[group(1), binding(9)]] -var normal_map_texture: texture_2d; -[[group(1), binding(10)]] -var normal_map_sampler: sampler; - -let PI: f32 = 3.141592653589793; - -fn saturate(value: f32) -> f32 { - return clamp(value, 0.0, 1.0); -} - -// distanceAttenuation is simply the square falloff of light intensity -// combined with a smooth attenuation at the edge of the light radius -// -// light radius is a non-physical construct for efficiency purposes, -// because otherwise every light affects every fragment in the scene -fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { - let factor = distanceSquare * inverseRangeSquared; - let smoothFactor = saturate(1.0 - factor * factor); - let attenuation = smoothFactor * smoothFactor; - return attenuation * 1.0 / max(distanceSquare, 0.0001); -} - -// Normal distribution function (specular D) -// Based on https://google.github.io/filament/Filament.html#citation-walter07 - -// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } - -// Simple implementation, has precision problems when using fp16 instead of fp32 -// see https://google.github.io/filament/Filament.html#listing_speculardfp16 -fn D_GGX(roughness: f32, NoH: f32, h: vec3) -> f32 { - let oneMinusNoHSquared = 1.0 - NoH * NoH; - let a = NoH * roughness; - let k = roughness / (oneMinusNoHSquared + a * a); - let d = k * k * (1.0 / PI); - return d; -} - -// Visibility function (Specular G) -// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } -// such that f_r becomes -// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) -// where -// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } -// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv -fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 { - let a2 = roughness * roughness; - let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2); - let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2); - let v = 0.5 / (lambdaV + lambdaL); - return v; -} - -// Fresnel function -// see https://google.github.io/filament/Filament.html#citation-schlick94 -// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 -fn F_Schlick_vec(f0: vec3, f90: f32, VoH: f32) -> vec3 { - // not using mix to keep the vec3 and float versions identical - return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); -} - -fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 { - // not using mix to keep the vec3 and float versions identical - return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); -} - -fn fresnel(f0: vec3, LoH: f32) -> vec3 { - // f_90 suitable for ambient occlusion - // see https://google.github.io/filament/Filament.html#lighting/occlusion - let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); - return F_Schlick_vec(f0, f90, LoH); -} - -// Specular BRDF -// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf - -// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m -// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } -fn specular(f0: vec3, roughness: f32, h: vec3, NoV: f32, NoL: f32, - NoH: f32, LoH: f32, specularIntensity: f32) -> vec3 { - let D = D_GGX(roughness, NoH, h); - let V = V_SmithGGXCorrelated(roughness, NoV, NoL); - let F = fresnel(f0, LoH); - - return (specularIntensity * D * V) * F; -} - -// Diffuse BRDF -// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf -// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm -// -// simplest approximation -// float Fd_Lambert() { -// return 1.0 / PI; -// } -// -// vec3 Fd = diffuseColor * Fd_Lambert(); -// -// Disney approximation -// See https://google.github.io/filament/Filament.html#citation-burley12 -// minimal quality difference -fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 { - let f90 = 0.5 + 2.0 * roughness * LoH * LoH; - let lightScatter = F_Schlick(1.0, f90, NoL); - let viewScatter = F_Schlick(1.0, f90, NoV); - return lightScatter * viewScatter * (1.0 / PI); -} - -// From https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile -fn EnvBRDFApprox(f0: vec3, perceptual_roughness: f32, NoV: f32) -> vec3 { - let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); - let c1 = vec4(1.0, 0.0425, 1.04, -0.04); - let r = perceptual_roughness * c0 + c1; - let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y; - let AB = vec2(-1.04, 1.04) * a004 + r.zw; - return f0 * AB.x + AB.y; -} - -fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { - // clamp perceptual roughness to prevent precision problems - // According to Filament design 0.089 is recommended for mobile - // Filament uses 0.045 for non-mobile - let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); - return clampedPerceptualRoughness * clampedPerceptualRoughness; -} - -// from https://64.github.io/tonemapping/ -// reinhard on RGB oversaturates colors -fn reinhard(color: vec3) -> vec3 { - return color / (1.0 + color); -} - -fn reinhard_extended(color: vec3, max_white: f32) -> vec3 { - let numerator = color * (1.0 + (color / vec3(max_white * max_white))); - return numerator / (1.0 + color); -} - -// luminance coefficients from Rec. 709. -// https://en.wikipedia.org/wiki/Rec._709 -fn luminance(v: vec3) -> f32 { - return dot(v, vec3(0.2126, 0.7152, 0.0722)); -} - -fn change_luminance(c_in: vec3, l_out: f32) -> vec3 { - let l_in = luminance(c_in); - return c_in * (l_out / l_in); -} - -fn reinhard_luminance(color: vec3) -> vec3 { - let l_old = luminance(color); - let l_new = l_old / (1.0 + l_old); - return change_luminance(color, l_new); -} - -fn reinhard_extended_luminance(color: vec3, max_white_l: f32) -> vec3 { - let l_old = luminance(color); - let numerator = l_old * (1.0 + (l_old / (max_white_l * max_white_l))); - let l_new = numerator / (1.0 + l_old); - return change_luminance(color, l_new); -} - -// NOTE: Keep in sync with bevy_pbr/src/light.rs -fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { - var z_slice: u32 = 0u; - if (is_orthographic) { - // NOTE: view_z is correct in the orthographic case - z_slice = u32(floor((view_z - lights.cluster_factors.z) * lights.cluster_factors.w)); - } else { - // NOTE: had to use -view_z to make it positive else log(negative) is nan - z_slice = u32(log(-view_z) * lights.cluster_factors.z - lights.cluster_factors.w + 1.0); - } - // NOTE: We use min as we may limit the far z plane used for clustering to be closeer than - // the furthest thing being drawn. This means that we need to limit to the maximum cluster. - return min(z_slice, lights.cluster_dimensions.z - 1u); -} - -fn fragment_cluster_index(frag_coord: vec2, view_z: f32, is_orthographic: bool) -> u32 { - let xy = vec2(floor(frag_coord * lights.cluster_factors.xy)); - let z_slice = view_z_to_z_slice(view_z, is_orthographic); - // NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer - // arrays based on the cluster index. - return min( - (xy.y * lights.cluster_dimensions.x + xy.x) * lights.cluster_dimensions.z + z_slice, - lights.cluster_dimensions.w - 1u - ); -} - -// this must match CLUSTER_COUNT_SIZE in light.rs -let CLUSTER_COUNT_SIZE = 9u; -fn unpack_offset_and_counts(cluster_index: u32) -> vec3 { -#ifdef NO_STORAGE_BUFFERS_SUPPORT - let offset_and_counts = cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)]; - // [ 31 .. 18 | 17 .. 9 | 8 .. 0 ] - // [ offset | point light count | spotlight count ] - return vec3( - (offset_and_counts >> (CLUSTER_COUNT_SIZE * 2u)) & ((1u << (32u - (CLUSTER_COUNT_SIZE * 2u))) - 1u), - (offset_and_counts >> CLUSTER_COUNT_SIZE) & ((1u << CLUSTER_COUNT_SIZE) - 1u), - offset_and_counts & ((1u << CLUSTER_COUNT_SIZE) - 1u), - ); -#else - return cluster_offsets_and_counts.data[cluster_index].xyz; -#endif -} - -fn get_light_id(index: u32) -> u32 { -#ifdef NO_STORAGE_BUFFERS_SUPPORT - // The index is correct but in cluster_light_index_lists we pack 4 u8s into a u32 - // This means the index into cluster_light_index_lists is index / 4 - let indices = cluster_light_index_lists.data[index >> 4u][(index >> 2u) & ((1u << 2u) - 1u)]; - // And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index - return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u); -#else - return cluster_light_index_lists.data[index]; -#endif -} - -fn point_light( - world_position: vec3, light: PointLight, roughness: f32, NdotV: f32, N: vec3, V: vec3, - R: vec3, F0: vec3, diffuseColor: vec3 -) -> vec3 { - let light_to_frag = light.position_radius.xyz - world_position.xyz; - let distance_square = dot(light_to_frag, light_to_frag); - let rangeAttenuation = - getDistanceAttenuation(distance_square, light.color_inverse_square_range.w); - - // Specular. - // Representative Point Area Lights. - // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 - let a = roughness; - let centerToRay = dot(light_to_frag, R) * R - light_to_frag; - let closestPoint = light_to_frag + centerToRay * saturate(light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay))); - let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); - let normalizationFactor = a / saturate(a + (light.position_radius.w * 0.5 * LspecLengthInverse)); - let specularIntensity = normalizationFactor * normalizationFactor; - - var L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? - var H: vec3 = normalize(L + V); - var NoL: f32 = saturate(dot(N, L)); - var NoH: f32 = saturate(dot(N, H)); - var LoH: f32 = saturate(dot(L, H)); - - let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity); - - // Diffuse. - // Comes after specular since its NoL is used in the lighting equation. - L = normalize(light_to_frag); - H = normalize(L + V); - NoL = saturate(dot(N, L)); - NoH = saturate(dot(N, H)); - LoH = saturate(dot(L, H)); - - let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); - - // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation - // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ - // where - // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color - // Φ is luminous power in lumens - // our rangeAttentuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius - - // For a point light, luminous intensity, I, in lumens per steradian is given by: - // I = Φ / 4 π - // The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower - - // NOTE: light.color.rgb is premultiplied with light.intensity / 4 π (which would be the luminous intensity) on the CPU - - // TODO compensate for energy loss https://google.github.io/filament/Filament.html#materialsystem/improvingthebrdfs/energylossinspecularreflectance - - return ((diffuse + specular_light) * light.color_inverse_square_range.rgb) * (rangeAttenuation * NoL); -} - -fn spot_light( - world_position: vec3, light: PointLight, roughness: f32, NdotV: f32, N: vec3, V: vec3, - R: vec3, F0: vec3, diffuseColor: vec3 -) -> vec3 { - // reuse the point light calculations - let point = point_light(world_position, light, roughness, NdotV, N, V, R, F0, diffuseColor); - - // reconstruct spot dir from x/z and y-direction flag - var spot_dir = vec3(light.light_custom_data.x, 0.0, light.light_custom_data.y); - spot_dir.y = sqrt(1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z); - if ((light.flags & POINT_LIGHT_FLAGS_SPOTLIGHT_Y_NEGATIVE) != 0u) { - spot_dir.y = -spot_dir.y; - } - let light_to_frag = light.position_radius.xyz - world_position.xyz; - - // calculate attenuation based on filament formula https://google.github.io/filament/Filament.html#listing_glslpunctuallight - // spot_scale and spot_offset have been precomputed - // note we normalize here to get "l" from the filament listing. spot_dir is already normalized - let cd = dot(-spot_dir, normalize(light_to_frag)); - let attenuation = saturate(cd * light.light_custom_data.z + light.light_custom_data.w); - let spot_attenuation = attenuation * attenuation; - - return point * spot_attenuation; -} - -fn directional_light(light: DirectionalLight, roughness: f32, NdotV: f32, normal: vec3, view: vec3, R: vec3, F0: vec3, diffuseColor: vec3) -> vec3 { - let incident_light = light.direction_to_light.xyz; - - let half_vector = normalize(incident_light + view); - let NoL = saturate(dot(normal, incident_light)); - let NoH = saturate(dot(normal, half_vector)); - let LoH = saturate(dot(incident_light, half_vector)); - - let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); - let specularIntensity = 1.0; - let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity); - - return (specular_light + diffuse) * light.color.rgb * NoL; -} - -fn fetch_point_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = point_lights.data[light_id]; - - let surface_to_light = light.position_radius.xyz - frag_position.xyz; - - // because the shadow maps align with the axes and the frustum planes are at 45 degrees - // we can get the worldspace depth by taking the largest absolute axis - let surface_to_light_abs = abs(surface_to_light); - let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); - - // The normal bias here is already scaled by the texel size at 1 world unit from the light. - // The texel size increases proportionally with distance from the light so multiplying by - // distance to light scales the normal bias to the texel size at the fragment distance. - let normal_offset = light.shadow_normal_bias * distance_to_light * surface_normal.xyz; - let depth_offset = light.shadow_depth_bias * normalize(surface_to_light.xyz); - let offset_position = frag_position.xyz + normal_offset + depth_offset; - - // similar largest-absolute-axis trick as above, but now with the offset fragment position - let frag_ls = light.position_radius.xyz - offset_position.xyz; - let abs_position_ls = abs(frag_ls); - let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); - - // NOTE: These simplifications come from multiplying: - // projection * vec4(0, 0, -major_axis_magnitude, 1.0) - // and keeping only the terms that have any impact on the depth. - // Projection-agnostic approach: - let zw = -major_axis_magnitude * light.light_custom_data.xy + light.light_custom_data.zw; - let depth = zw.x / zw.y; - - // do the lookup, using HW PCF and comparison - // NOTE: Due to the non-uniform control flow above, we must use the Level variant of - // textureSampleCompare to avoid undefined behaviour due to some of the fragments in - // a quad (2x2 fragments) being processed not being sampled, and this messing with - // mip-mapping functionality. The shadow maps have no mipmaps so Level just samples - // from LOD 0. -#ifdef NO_ARRAY_TEXTURES_SUPPORT - return textureSampleCompare(point_shadow_textures, point_shadow_textures_sampler, frag_ls, depth); -#else - return textureSampleCompareLevel(point_shadow_textures, point_shadow_textures_sampler, frag_ls, i32(light_id), depth); -#endif -} - -fn fetch_spot_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = point_lights.data[light_id]; - - let surface_to_light = light.position_radius.xyz - frag_position.xyz; - - // construct the light view matrix - var spot_dir = vec3(light.light_custom_data.x, 0.0, light.light_custom_data.y); - // reconstruct spot dir from x/z and y-direction flag - spot_dir.y = sqrt(1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z); - if ((light.flags & POINT_LIGHT_FLAGS_SPOTLIGHT_Y_NEGATIVE) != 0u) { - spot_dir.y = -spot_dir.y; - } - - // view matrix z_axis is the reverse of transform.forward() - let fwd = -spot_dir; - let distance_to_light = dot(fwd, surface_to_light); - let offset_position = - -surface_to_light - + (light.shadow_depth_bias * normalize(surface_to_light)) - + (surface_normal.xyz * light.shadow_normal_bias) * distance_to_light; - - // the construction of the up and right vectors needs to precisely mirror the code - // in render/light.rs:spotlight_view_matrix - var sign = -1.0; - if (fwd.z >= 0.0) { - sign = 1.0; - } - let a = -1.0 / (fwd.z + sign); - let b = fwd.x * fwd.y * a; - let up_dir = vec3(1.0 + sign * fwd.x * fwd.x * a, sign * b, -sign * fwd.x); - let right_dir = vec3(-b, -sign - fwd.y * fwd.y * a, fwd.y); - let light_inv_rot = mat3x3(right_dir, up_dir, fwd); - - // because the matrix is a pure rotation matrix, the inverse is just the transpose, and to calculate - // the product of the transpose with a vector we can just post-multiply instead of pre-multplying. - // this allows us to keep the matrix construction code identical between CPU and GPU. - let projected_position = offset_position * light_inv_rot; - - // divide xy by perspective matrix "f" and by -projected.z (projected.z is -projection matrix's w) - // to get ndc coordinates - let f_div_minus_z = 1.0 / (light.spotlight_tan_angle * -projected_position.z); - let shadow_xy_ndc = projected_position.xy * f_div_minus_z; - // convert to uv coordinates - let shadow_uv = shadow_xy_ndc * vec2(0.5, -0.5) + vec2(0.5, 0.5); - - // 0.1 must match POINT_LIGHT_NEAR_Z - let depth = 0.1 / -projected_position.z; - - #ifdef NO_ARRAY_TEXTURES_SUPPORT - return textureSampleCompare(directional_shadow_textures, directional_shadow_textures_sampler, - shadow_uv, depth); - #else - return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, - shadow_uv, i32(light_id) + lights.spotlight_shadowmap_offset, depth); - #endif -} - -fn fetch_directional_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = lights.directional_lights[light_id]; - - // The normal bias is scaled to the texel size. - let normal_offset = light.shadow_normal_bias * surface_normal.xyz; - let depth_offset = light.shadow_depth_bias * light.direction_to_light.xyz; - let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); - - let offset_position_clip = light.view_projection * offset_position; - if (offset_position_clip.w <= 0.0) { - return 1.0; - } - let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; - // No shadow outside the orthographic projection volume - if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 - || any(offset_position_ndc > vec3(1.0))) { - return 1.0; - } - - // compute texture coordinates for shadow lookup, compensating for the Y-flip difference - // between the NDC and texture coordinates - let flip_correction = vec2(0.5, -0.5); - let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); - - let depth = offset_position_ndc.z; - // do the lookup, using HW PCF and comparison - // NOTE: Due to non-uniform control flow above, we must use the level variant of the texture - // sampler to avoid use of implicit derivatives causing possible undefined behavior. -#ifdef NO_ARRAY_TEXTURES_SUPPORT - return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, depth); -#else - return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, i32(light_id), depth); -#endif -} - -fn hsv2rgb(hue: f32, saturation: f32, value: f32) -> vec3 { - let rgb = clamp( - abs( - ((hue * 6.0 + vec3(0.0, 4.0, 2.0)) % 6.0) - 3.0 - ) - 1.0, - vec3(0.0), - vec3(1.0) - ); - - return value * mix( vec3(1.0), rgb, vec3(saturation)); -} - -fn random1D(s: f32) -> f32 { - return fract(sin(s * 12.9898) * 43758.5453123); -} +#import bevy_pbr::utils +#import bevy_pbr::clustered_forward +#import bevy_pbr::lighting +#import bevy_pbr::shadows +#import bevy_pbr::pbr_functions struct FragmentInput { [[builtin(front_facing)]] is_front: bool; @@ -551,28 +19,37 @@ struct FragmentInput { #endif #ifdef VERTEX_COLORS [[location(4)]] color: vec4; -#endif +#endif }; [[stage(fragment)]] fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { var output_color: vec4 = material.base_color; - #ifdef VERTEX_COLORS +#ifdef VERTEX_COLORS output_color = output_color * in.color; - #endif +#endif if ((material.flags & STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) { output_color = output_color * textureSample(base_color_texture, base_color_sampler, in.uv); } - // // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit if ((material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { + // Prepare a 'processed' StandardMaterial by sampling all textures to resolve + // the material members + var pbr_input: PbrInput; + + pbr_input.material.base_color = output_color; + pbr_input.material.reflectance = material.reflectance; + pbr_input.material.flags = material.flags; + pbr_input.material.alpha_cutoff = material.alpha_cutoff; + // TODO use .a for exposure compensation in HDR var emissive: vec4 = material.emissive; if ((material.flags & STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { emissive = vec4(emissive.rgb * textureSample(emissive_texture, emissive_sampler, in.uv).rgb, 1.0); } + pbr_input.material.emissive = emissive; - // calculate non-linear roughness from linear perceptualRoughness var metallic: f32 = material.metallic; var perceptual_roughness: f32 = material.perceptual_roughness; if ((material.flags & STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) { @@ -581,192 +58,34 @@ fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { metallic = metallic * metallic_roughness.b; perceptual_roughness = perceptual_roughness * metallic_roughness.g; } - let roughness = perceptualRoughnessToRoughness(perceptual_roughness); + pbr_input.material.metallic = metallic; + pbr_input.material.perceptual_roughness = perceptual_roughness; var occlusion: f32 = 1.0; if ((material.flags & STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { occlusion = textureSample(occlusion_texture, occlusion_sampler, in.uv).r; } + pbr_input.occlusion = occlusion; - var N: vec3 = normalize(in.world_normal); + pbr_input.frag_coord = in.frag_coord; + pbr_input.world_position = in.world_position; + pbr_input.world_normal = in.world_normal; -#ifdef VERTEX_TANGENTS -#ifdef STANDARDMATERIAL_NORMAL_MAP - var T: vec3 = normalize(in.world_tangent.xyz - N * dot(in.world_tangent.xyz, N)); - var B: vec3 = cross(N, T) * in.world_tangent.w; -#endif -#endif + pbr_input.is_orthographic = view.projection[3].w == 1.0; - if ((material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) { - if (!in.is_front) { - N = -N; + pbr_input.N = prepare_normal( + in.world_normal, #ifdef VERTEX_TANGENTS #ifdef STANDARDMATERIAL_NORMAL_MAP - T = -T; - B = -B; + in.world_tangent, #endif #endif - } - } - -#ifdef VERTEX_TANGENTS -#ifdef STANDARDMATERIAL_NORMAL_MAP - let TBN = mat3x3(T, B, N); - // Nt is the tangent-space normal. - var Nt: vec3; - if ((material.flags & STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u) { - // Only use the xy components and derive z for 2-component normal maps. - Nt = vec3(textureSample(normal_map_texture, normal_map_sampler, in.uv).rg * 2.0 - 1.0, 0.0); - Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y); - } else { - Nt = textureSample(normal_map_texture, normal_map_sampler, in.uv).rgb * 2.0 - 1.0; - } - // Normal maps authored for DirectX require flipping the y component - if ((material.flags & STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u) { - Nt.y = -Nt.y; - } - N = normalize(TBN * Nt); -#endif -#endif - - if ((material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) { - // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 - output_color.a = 1.0; - } else if ((material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) { - if (output_color.a >= material.alpha_cutoff) { - // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque - output_color.a = 1.0; - } else { - // NOTE: output_color.a < material.alpha_cutoff should not is not rendered - // NOTE: This and any other discards mean that early-z testing cannot be done! - discard; - } - } - - var V: vec3; - // If the projection is not orthographic - let is_orthographic = view.projection[3].w == 1.0; - if (is_orthographic) { - // Orthographic view vector - V = normalize(vec3(view.view_proj[0].z, view.view_proj[1].z, view.view_proj[2].z)); - } else { - // Only valid for a perpective projection - V = normalize(view.world_position.xyz - in.world_position.xyz); - } - - // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" - let NdotV = max(dot(N, V), 0.0001); - - // Remapping [0,1] reflectance to F0 - // See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping - let reflectance = material.reflectance; - let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic; - - // Diffuse strength inversely related to metallicity - let diffuse_color = output_color.rgb * (1.0 - metallic); - - let R = reflect(-V, N); - - // accumulate color - var light_accum: vec3 = vec3(0.0); - - let view_z = dot(vec4( - view.inverse_view[0].z, - view.inverse_view[1].z, - view.inverse_view[2].z, - view.inverse_view[3].z - ), in.world_position); - let cluster_index = fragment_cluster_index(in.frag_coord.xy, view_z, is_orthographic); - let offset_and_counts = unpack_offset_and_counts(cluster_index); - - // point lights - for (var i: u32 = offset_and_counts[0]; i < offset_and_counts[0] + offset_and_counts[1]; i = i + 1u) { - let light_id = get_light_id(i); - let light = point_lights.data[light_id]; - var shadow: f32 = 1.0; - if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (light.flags & POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = fetch_point_shadow(light_id, in.world_position, in.world_normal); - } - let light_contrib = point_light(in.world_position.xyz, light, roughness, NdotV, N, V, R, F0, diffuse_color); - light_accum = light_accum + light_contrib * shadow; - } - - // spotlights - for (var i: u32 = offset_and_counts[0] + offset_and_counts[1]; i < offset_and_counts[0] + offset_and_counts[1] + offset_and_counts[2]; i = i + 1u) { - let light_id = get_light_id(i); - let light = point_lights.data[light_id]; - var shadow: f32 = 1.0; - if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (light.flags & POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = fetch_spot_shadow(light_id, in.world_position, in.world_normal); - } - let light_contrib = spot_light(in.world_position.xyz, light, roughness, NdotV, N, V, R, F0, diffuse_color); - light_accum = light_accum + light_contrib * shadow; - } - - // directional lights - let n_directional_lights = lights.n_directional_lights; - for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { - let light = lights.directional_lights[i]; - var shadow: f32 = 1.0; - if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (light.flags & DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = fetch_directional_shadow(i, in.world_position, in.world_normal); - } - let light_contrib = directional_light(light, roughness, NdotV, N, V, R, F0, diffuse_color); - light_accum = light_accum + light_contrib * shadow; - } - - let diffuse_ambient = EnvBRDFApprox(diffuse_color, 1.0, NdotV); - let specular_ambient = EnvBRDFApprox(F0, perceptual_roughness, NdotV); - - output_color = vec4( - light_accum + - (diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion + - emissive.rgb * output_color.a, - output_color.a); - - // Cluster allocation debug (using 'over' alpha blending) -#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES - // NOTE: This debug mode visualises the z-slices - let cluster_overlay_alpha = 0.1; - var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic); - // A hack to make the colors alternate a bit more - if ((z_slice & 1u) == 1u) { - z_slice = z_slice + lights.cluster_dimensions.z / 2u; - } - let slice_color = hsv2rgb(f32(z_slice) / f32(lights.cluster_dimensions.z + 1u), 1.0, 0.5); - output_color = vec4( - (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color, - output_color.a - ); -#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES -#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY - // NOTE: This debug mode visualises the number of lights within the cluster that contains - // the fragment. It shows a sort of lighting complexity measure. - let cluster_overlay_alpha = 0.1; - let max_light_complexity_per_cluster = 64.0; - output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r - + cluster_overlay_alpha * smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count[1])); - output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g - + cluster_overlay_alpha * (1.0 - smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count[1]))); -#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY -#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY - // NOTE: Visualizes the cluster to which the fragment belongs - let cluster_overlay_alpha = 0.1; - let cluster_color = hsv2rgb(random1D(f32(cluster_index)), 1.0, 0.5); - output_color = vec4( - (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color, - output_color.a + in.uv, + in.is_front, ); -#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + pbr_input.V = calculate_view(in.world_position, pbr_input.is_orthographic); - // tone_mapping - output_color = vec4(reinhard_luminance(output_color.rgb), output_color.a); - // Gamma correction. - // Not needed with sRGB buffer - // output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2)); + output_color = pbr(pbr_input); } return output_color; diff --git a/crates/bevy_pbr/src/render/pbr_bindings.wgsl b/crates/bevy_pbr/src/render/pbr_bindings.wgsl new file mode 100644 index 0000000000000..4c9205461ce11 --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_bindings.wgsl @@ -0,0 +1,26 @@ +#define_import_path bevy_pbr::pbr_bindings + +#import bevy_pbr::pbr_types + +[[group(1), binding(0)]] +var material: StandardMaterial; +[[group(1), binding(1)]] +var base_color_texture: texture_2d; +[[group(1), binding(2)]] +var base_color_sampler: sampler; +[[group(1), binding(3)]] +var emissive_texture: texture_2d; +[[group(1), binding(4)]] +var emissive_sampler: sampler; +[[group(1), binding(5)]] +var metallic_roughness_texture: texture_2d; +[[group(1), binding(6)]] +var metallic_roughness_sampler: sampler; +[[group(1), binding(7)]] +var occlusion_texture: texture_2d; +[[group(1), binding(8)]] +var occlusion_sampler: sampler; +[[group(1), binding(9)]] +var normal_map_texture: texture_2d; +[[group(1), binding(10)]] +var normal_map_sampler: sampler; diff --git a/crates/bevy_pbr/src/render/pbr_functions.wgsl b/crates/bevy_pbr/src/render/pbr_functions.wgsl new file mode 100644 index 0000000000000..a2349fcbdeee3 --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_functions.wgsl @@ -0,0 +1,196 @@ +#define_import_path bevy_pbr::pbr_functions + +// NOTE: This ensures that the world_normal is normalized and if +// vertex tangents and normal maps then normal mapping may be applied. +fn prepare_normal( + world_normal: vec3, +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + world_tangent: vec4, +#endif +#endif + uv: vec2, + is_front: bool, +) -> vec3 { + var N: vec3 = normalize(world_normal); + +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + // NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be + // normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the + // vertex tangent! Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + var T: vec3 = world_tangent.xyz; + var B: vec3 = world_tangent.w * cross(N, T); +#endif +#endif + + if ((material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) { + if (!is_front) { + N = -N; +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + T = -T; + B = -B; +#endif +#endif + } + } + +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + // Nt is the tangent-space normal. + var Nt: vec3; + if ((material.flags & STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u) { + // Only use the xy components and derive z for 2-component normal maps. + Nt = vec3(textureSample(normal_map_texture, normal_map_sampler, uv).rg * 2.0 - 1.0, 0.0); + Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y); + } else { + Nt = textureSample(normal_map_texture, normal_map_sampler, uv).rgb * 2.0 - 1.0; + } + // Normal maps authored for DirectX require flipping the y component + if ((material.flags & STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u) { + Nt.y = -Nt.y; + } + // NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from + // the normal map texture in this way to be an EXACT inverse of how the normal map baker + // calculates the normal maps so there is no error introduced. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + N = normalize(Nt.x * T + Nt.y * B + Nt.z * N); +#endif +#endif + + return N; +} + +// NOTE: Correctly calculates the view vector depending on whether +// the projection is orthographic or perspective. +fn calculate_view( + world_position: vec4, + is_orthographic: bool, +) -> vec3 { + var V: vec3; + if (is_orthographic) { + // Orthographic view vector + V = normalize(vec3(view.view_proj[0].z, view.view_proj[1].z, view.view_proj[2].z)); + } else { + // Only valid for a perpective projection + V = normalize(view.world_position.xyz - world_position.xyz); + } + return V; +} + +struct PbrInput { + material: StandardMaterial; + occlusion: f32; + frag_coord: vec4; + world_position: vec4; + world_normal: vec3; + N: vec3; + V: vec3; + is_orthographic: bool; +}; + +fn pbr( + in: PbrInput, +) -> vec4 { + var output_color: vec4 = in.material.base_color; + + // TODO use .a for exposure compensation in HDR + let emissive = in.material.emissive; + + // calculate non-linear roughness from linear perceptualRoughness + let metallic = in.material.metallic; + let perceptual_roughness = in.material.perceptual_roughness; + let roughness = perceptualRoughnessToRoughness(perceptual_roughness); + + let occlusion = in.occlusion; + + if ((in.material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) { + // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 + output_color.a = 1.0; + } else if ((in.material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) { + if (output_color.a >= in.material.alpha_cutoff) { + // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque + output_color.a = 1.0; + } else { + // NOTE: output_color.a < in.material.alpha_cutoff should not is not rendered + // NOTE: This and any other discards mean that early-z testing cannot be done! + discard; + } + } + + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(in.N, in.V), 0.0001); + + // Remapping [0,1] reflectance to F0 + // See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping + let reflectance = in.material.reflectance; + let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic; + + // Diffuse strength inversely related to metallicity + let diffuse_color = output_color.rgb * (1.0 - metallic); + + let R = reflect(-in.V, in.N); + + // accumulate color + var light_accum: vec3 = vec3(0.0); + + let view_z = dot(vec4( + view.inverse_view[0].z, + view.inverse_view[1].z, + view.inverse_view[2].z, + view.inverse_view[3].z + ), in.world_position); + let cluster_index = fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic); + let offset_and_count = unpack_offset_and_count(cluster_index); + for (var i: u32 = offset_and_count[0]; i < offset_and_count[0] + offset_and_count[1]; i = i + 1u) { + let light_id = get_light_id(i); + let light = point_lights.data[light_id]; + var shadow: f32 = 1.0; + if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (light.flags & POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = fetch_point_shadow(light_id, in.world_position, in.world_normal); + } + let light_contrib = point_light(in.world_position.xyz, light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color); + light_accum = light_accum + light_contrib * shadow; + } + + let n_directional_lights = lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + let light = lights.directional_lights[i]; + var shadow: f32 = 1.0; + if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (light.flags & DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = fetch_directional_shadow(i, in.world_position, in.world_normal); + } + let light_contrib = directional_light(light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color); + light_accum = light_accum + light_contrib * shadow; + } + + let diffuse_ambient = EnvBRDFApprox(diffuse_color, 1.0, NdotV); + let specular_ambient = EnvBRDFApprox(F0, perceptual_roughness, NdotV); + + output_color = vec4( + light_accum + + (diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion + + emissive.rgb * output_color.a, + output_color.a); + + output_color = cluster_debug_visualization( + output_color, + view_z, + in.is_orthographic, + offset_and_count, + cluster_index, + ); + + // tone_mapping + output_color = vec4(reinhard_luminance(output_color.rgb), output_color.a); + // Gamma correction. + // Not needed with sRGB buffer + // output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2)); + + return output_color; +} diff --git a/crates/bevy_pbr/src/render/pbr_lighting.wgsl b/crates/bevy_pbr/src/render/pbr_lighting.wgsl new file mode 100644 index 0000000000000..79818bc837126 --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_lighting.wgsl @@ -0,0 +1,255 @@ +#define_import_path bevy_pbr::lighting + +// From the Filament design doc +// https://google.github.io/filament/Filament.html#table_symbols +// Symbol Definition +// v View unit vector +// l Incident light unit vector +// n Surface normal unit vector +// h Half unit vector between l and v +// f BRDF +// f_d Diffuse component of a BRDF +// f_r Specular component of a BRDF +// α Roughness, remapped from using input perceptualRoughness +// σ Diffuse reflectance +// Ω Spherical domain +// f0 Reflectance at normal incidence +// f90 Reflectance at grazing angle +// χ+(a) Heaviside function (1 if a>0 and 0 otherwise) +// nior Index of refraction (IOR) of an interface +// ⟨n⋅l⟩ Dot product clamped to [0..1] +// ⟨a⟩ Saturated value (clamped to [0..1]) + +// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material +// and consists of two components, the diffuse component (f_d) and the specular component (f_r): +// f(v,l) = f_d(v,l) + f_r(v,l) +// +// The form of the microfacet model is the same for diffuse and specular +// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm +// +// In which: +// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets +// G models the visibility (or occlusion or shadow-masking) of the microfacets +// f_m is the microfacet BRDF and differs between specular and diffuse components +// +// The above integration needs to be approximated. + +// distanceAttenuation is simply the square falloff of light intensity +// combined with a smooth attenuation at the edge of the light radius +// +// light radius is a non-physical construct for efficiency purposes, +// because otherwise every light affects every fragment in the scene +fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { + let factor = distanceSquare * inverseRangeSquared; + let smoothFactor = saturate(1.0 - factor * factor); + let attenuation = smoothFactor * smoothFactor; + return attenuation * 1.0 / max(distanceSquare, 0.0001); +} + +// Normal distribution function (specular D) +// Based on https://google.github.io/filament/Filament.html#citation-walter07 + +// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } + +// Simple implementation, has precision problems when using fp16 instead of fp32 +// see https://google.github.io/filament/Filament.html#listing_speculardfp16 +fn D_GGX(roughness: f32, NoH: f32, h: vec3) -> f32 { + let oneMinusNoHSquared = 1.0 - NoH * NoH; + let a = NoH * roughness; + let k = roughness / (oneMinusNoHSquared + a * a); + let d = k * k * (1.0 / PI); + return d; +} + +// Visibility function (Specular G) +// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } +// such that f_r becomes +// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) +// where +// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } +// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv +fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 { + let a2 = roughness * roughness; + let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2); + let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2); + let v = 0.5 / (lambdaV + lambdaL); + return v; +} + +// Fresnel function +// see https://google.github.io/filament/Filament.html#citation-schlick94 +// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 +fn F_Schlick_vec(f0: vec3, f90: f32, VoH: f32) -> vec3 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); +} + +fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); +} + +fn fresnel(f0: vec3, LoH: f32) -> vec3 { + // f_90 suitable for ambient occlusion + // see https://google.github.io/filament/Filament.html#lighting/occlusion + let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); + return F_Schlick_vec(f0, f90, LoH); +} + +// Specular BRDF +// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf + +// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m +// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } +fn specular(f0: vec3, roughness: f32, h: vec3, NoV: f32, NoL: f32, + NoH: f32, LoH: f32, specularIntensity: f32) -> vec3 { + let D = D_GGX(roughness, NoH, h); + let V = V_SmithGGXCorrelated(roughness, NoV, NoL); + let F = fresnel(f0, LoH); + + return (specularIntensity * D * V) * F; +} + +// Diffuse BRDF +// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf +// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm +// +// simplest approximation +// float Fd_Lambert() { +// return 1.0 / PI; +// } +// +// vec3 Fd = diffuseColor * Fd_Lambert(); +// +// Disney approximation +// See https://google.github.io/filament/Filament.html#citation-burley12 +// minimal quality difference +fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 { + let f90 = 0.5 + 2.0 * roughness * LoH * LoH; + let lightScatter = F_Schlick(1.0, f90, NoL); + let viewScatter = F_Schlick(1.0, f90, NoV); + return lightScatter * viewScatter * (1.0 / PI); +} + +// From https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile +fn EnvBRDFApprox(f0: vec3, perceptual_roughness: f32, NoV: f32) -> vec3 { + let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); + let c1 = vec4(1.0, 0.0425, 1.04, -0.04); + let r = perceptual_roughness * c0 + c1; + let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y; + let AB = vec2(-1.04, 1.04) * a004 + r.zw; + return f0 * AB.x + AB.y; +} + +fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { + // clamp perceptual roughness to prevent precision problems + // According to Filament design 0.089 is recommended for mobile + // Filament uses 0.045 for non-mobile + let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); + return clampedPerceptualRoughness * clampedPerceptualRoughness; +} + +// from https://64.github.io/tonemapping/ +// reinhard on RGB oversaturates colors +fn reinhard(color: vec3) -> vec3 { + return color / (1.0 + color); +} + +fn reinhard_extended(color: vec3, max_white: f32) -> vec3 { + let numerator = color * (1.0 + (color / vec3(max_white * max_white))); + return numerator / (1.0 + color); +} + +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} + +fn change_luminance(c_in: vec3, l_out: f32) -> vec3 { + let l_in = luminance(c_in); + return c_in * (l_out / l_in); +} + +fn reinhard_luminance(color: vec3) -> vec3 { + let l_old = luminance(color); + let l_new = l_old / (1.0 + l_old); + return change_luminance(color, l_new); +} + +fn reinhard_extended_luminance(color: vec3, max_white_l: f32) -> vec3 { + let l_old = luminance(color); + let numerator = l_old * (1.0 + (l_old / (max_white_l * max_white_l))); + let l_new = numerator / (1.0 + l_old); + return change_luminance(color, l_new); +} + +fn point_light( + world_position: vec3, light: PointLight, roughness: f32, NdotV: f32, N: vec3, V: vec3, + R: vec3, F0: vec3, diffuseColor: vec3 +) -> vec3 { + let light_to_frag = light.position_radius.xyz - world_position.xyz; + let distance_square = dot(light_to_frag, light_to_frag); + let rangeAttenuation = + getDistanceAttenuation(distance_square, light.color_inverse_square_range.w); + + // Specular. + // Representative Point Area Lights. + // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 + let a = roughness; + let centerToRay = dot(light_to_frag, R) * R - light_to_frag; + let closestPoint = light_to_frag + centerToRay * saturate(light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay))); + let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); + let normalizationFactor = a / saturate(a + (light.position_radius.w * 0.5 * LspecLengthInverse)); + let specularIntensity = normalizationFactor * normalizationFactor; + + var L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? + var H: vec3 = normalize(L + V); + var NoL: f32 = saturate(dot(N, L)); + var NoH: f32 = saturate(dot(N, H)); + var LoH: f32 = saturate(dot(L, H)); + + let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity); + + // Diffuse. + // Comes after specular since its NoL is used in the lighting equation. + L = normalize(light_to_frag); + H = normalize(L + V); + NoL = saturate(dot(N, L)); + NoH = saturate(dot(N, H)); + LoH = saturate(dot(L, H)); + + let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); + + // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation + // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ + // where + // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color + // Φ is luminous power in lumens + // our rangeAttentuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius + + // For a point light, luminous intensity, I, in lumens per steradian is given by: + // I = Φ / 4 π + // The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower + + // NOTE: light.color.rgb is premultiplied with light.intensity / 4 π (which would be the luminous intensity) on the CPU + + // TODO compensate for energy loss https://google.github.io/filament/Filament.html#materialsystem/improvingthebrdfs/energylossinspecularreflectance + + return ((diffuse + specular_light) * light.color_inverse_square_range.rgb) * (rangeAttenuation * NoL); +} + +fn directional_light(light: DirectionalLight, roughness: f32, NdotV: f32, normal: vec3, view: vec3, R: vec3, F0: vec3, diffuseColor: vec3) -> vec3 { + let incident_light = light.direction_to_light.xyz; + + let half_vector = normalize(incident_light + view); + let NoL = saturate(dot(normal, incident_light)); + let NoH = saturate(dot(normal, half_vector)); + let LoH = saturate(dot(incident_light, half_vector)); + + let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); + let specularIntensity = 1.0; + let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity); + + return (specular_light + diffuse) * light.color.rgb * NoL; +} diff --git a/crates/bevy_pbr/src/render/pbr_types.wgsl b/crates/bevy_pbr/src/render/pbr_types.wgsl new file mode 100644 index 0000000000000..6927424fb4b67 --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_types.wgsl @@ -0,0 +1,24 @@ +#define_import_path bevy_pbr::pbr_types + +struct StandardMaterial { + base_color: vec4; + emissive: vec4; + perceptual_roughness: f32; + metallic: f32; + reflectance: f32; + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32; + alpha_cutoff: f32; +}; + +let STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; +let STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; +let STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; +let STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; +let STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; +let STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u; +let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u; +let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u; +let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u; +let STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 512u; +let STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 1024u; diff --git a/crates/bevy_pbr/src/render/shadows.wgsl b/crates/bevy_pbr/src/render/shadows.wgsl new file mode 100644 index 0000000000000..9cc2c7e84d928 --- /dev/null +++ b/crates/bevy_pbr/src/render/shadows.wgsl @@ -0,0 +1,77 @@ +#define_import_path bevy_pbr::shadows + +fn fetch_point_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = point_lights.data[light_id]; + + // because the shadow maps align with the axes and the frustum planes are at 45 degrees + // we can get the worldspace depth by taking the largest absolute axis + let surface_to_light = light.position_radius.xyz - frag_position.xyz; + let surface_to_light_abs = abs(surface_to_light); + let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); + + // The normal bias here is already scaled by the texel size at 1 world unit from the light. + // The texel size increases proportionally with distance from the light so multiplying by + // distance to light scales the normal bias to the texel size at the fragment distance. + let normal_offset = light.shadow_normal_bias * distance_to_light * surface_normal.xyz; + let depth_offset = light.shadow_depth_bias * normalize(surface_to_light.xyz); + let offset_position = frag_position.xyz + normal_offset + depth_offset; + + // similar largest-absolute-axis trick as above, but now with the offset fragment position + let frag_ls = light.position_radius.xyz - offset_position.xyz; + let abs_position_ls = abs(frag_ls); + let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); + + // NOTE: These simplifications come from multiplying: + // projection * vec4(0, 0, -major_axis_magnitude, 1.0) + // and keeping only the terms that have any impact on the depth. + // Projection-agnostic approach: + let zw = -major_axis_magnitude * light.projection_lr.xy + light.projection_lr.zw; + let depth = zw.x / zw.y; + + // do the lookup, using HW PCF and comparison + // NOTE: Due to the non-uniform control flow above, we must use the Level variant of + // textureSampleCompare to avoid undefined behaviour due to some of the fragments in + // a quad (2x2 fragments) being processed not being sampled, and this messing with + // mip-mapping functionality. The shadow maps have no mipmaps so Level just samples + // from LOD 0. +#ifdef NO_ARRAY_TEXTURES_SUPPORT + return textureSampleCompare(point_shadow_textures, point_shadow_textures_sampler, frag_ls, depth); +#else + return textureSampleCompareLevel(point_shadow_textures, point_shadow_textures_sampler, frag_ls, i32(light_id), depth); +#endif +} + +fn fetch_directional_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = lights.directional_lights[light_id]; + + // The normal bias is scaled to the texel size. + let normal_offset = light.shadow_normal_bias * surface_normal.xyz; + let depth_offset = light.shadow_depth_bias * light.direction_to_light.xyz; + let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); + + let offset_position_clip = light.view_projection * offset_position; + if (offset_position_clip.w <= 0.0) { + return 1.0; + } + let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; + // No shadow outside the orthographic projection volume + if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 + || any(offset_position_ndc > vec3(1.0))) { + return 1.0; + } + + // compute texture coordinates for shadow lookup, compensating for the Y-flip difference + // between the NDC and texture coordinates + let flip_correction = vec2(0.5, -0.5); + let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); + + let depth = offset_position_ndc.z; + // do the lookup, using HW PCF and comparison + // NOTE: Due to non-uniform control flow above, we must use the level variant of the texture + // sampler to avoid use of implicit derivatives causing possible undefined behavior. +#ifdef NO_ARRAY_TEXTURES_SUPPORT + return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, depth); +#else + return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, i32(light_id), depth); +#endif +} diff --git a/crates/bevy_pbr/src/render/skinning.wgsl b/crates/bevy_pbr/src/render/skinning.wgsl index 56b35634a09cd..3596ba4c76231 100644 --- a/crates/bevy_pbr/src/render/skinning.wgsl +++ b/crates/bevy_pbr/src/render/skinning.wgsl @@ -1,20 +1,20 @@ -// If using this WGSL snippet as an #import, a dedicated +// If using this WGSL snippet as an #import, a dedicated // "joint_matricies" uniform of type SkinnedMesh must be added in the // main shader. #define_import_path bevy_pbr::skinning -/// HACK: This works around naga not supporting matrix addition in SPIR-V +/// HACK: This works around naga not supporting matrix addition in SPIR-V // translations. See https://github.com/gfx-rs/naga/issues/1527 fn add_matrix( a: mat4x4, b: mat4x4, ) -> mat4x4 { return mat4x4( - a.x + b.x, - a.y + b.y, - a.z + b.z, - a.w + b.w, + a[0] + b[0], + a[1] + b[1], + a[2] + b[2], + a[3] + b[3], ); } @@ -29,10 +29,10 @@ fn skin_model( } fn inverse_transpose_3x3(in: mat3x3) -> mat3x3 { - let x = cross(in.y, in.z); - let y = cross(in.z, in.x); - let z = cross(in.x, in.y); - let det = dot(in.z, z); + let x = cross(in[1], in[2]); + let y = cross(in[2], in[0]); + let z = cross(in[0], in[1]); + let det = dot(in[2], z); return mat3x3( x / det, y / det, @@ -50,17 +50,3 @@ fn skin_normals( model[2].xyz )) * normal; } - -fn skin_tangents( - model: mat4x4, - tangent: vec4, -) -> vec4 { - return vec4( - mat3x3( - model[0].xyz, - model[1].xyz, - model[2].xyz - ) * tangent.xyz, - tangent.w - ); -} \ No newline at end of file diff --git a/crates/bevy_pbr/src/render/utils.wgsl b/crates/bevy_pbr/src/render/utils.wgsl new file mode 100644 index 0000000000000..ac13af027da7f --- /dev/null +++ b/crates/bevy_pbr/src/render/utils.wgsl @@ -0,0 +1,23 @@ +#define_import_path bevy_pbr::utils + +let PI: f32 = 3.141592653589793; + +fn saturate(value: f32) -> f32 { + return clamp(value, 0.0, 1.0); +} + +fn hsv2rgb(hue: f32, saturation: f32, value: f32) -> vec3 { + let rgb = clamp( + abs( + ((hue * 6.0 + vec3(0.0, 4.0, 2.0)) % 6.0) - 3.0 + ) - 1.0, + vec3(0.0), + vec3(1.0) + ); + + return value * mix( vec3(1.0), rgb, vec3(saturation)); +} + +fn random1D(s: f32) -> f32 { + return fract(sin(s * 12.9898) * 43758.5453123); +} diff --git a/crates/bevy_pbr/src/render/wireframe.wgsl b/crates/bevy_pbr/src/render/wireframe.wgsl index 39c5f443854a6..b3dc23c86fa27 100644 --- a/crates/bevy_pbr/src/render/wireframe.wgsl +++ b/crates/bevy_pbr/src/render/wireframe.wgsl @@ -1,5 +1,17 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_view_bindings + +[[group(1), binding(0)]] +var mesh: Mesh; + +#ifdef SKINNED +[[group(1), binding(1)]] +var joint_matrices: SkinnedMesh; +#import bevy_pbr::skinning +#endif + +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions struct Vertex { [[location(0)]] position: vec3; @@ -9,19 +21,10 @@ struct Vertex { #endif }; -[[group(1), binding(0)]] -var mesh: Mesh; - struct VertexOutput { [[builtin(position)]] clip_position: vec4; }; -#ifdef SKINNED -[[group(1), binding(1)]] -var joint_matrices: SkinnedMesh; -#import bevy_pbr::skinning -#endif - [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { #ifdef SKINNED @@ -30,10 +33,8 @@ fn vertex(vertex: Vertex) -> VertexOutput { let model = mesh.model; #endif - let world_position = model * vec4(vertex.position, 1.0); var out: VertexOutput; - out.clip_position = view.view_proj * world_position; - + out.clip_position = mesh_position_local_to_clip(model, vec4(vertex.position, 1.0)); return out; } diff --git a/crates/bevy_pbr/src/wireframe.rs b/crates/bevy_pbr/src/wireframe.rs index b8116c6f1fe6d..6a7a4c3c297c2 100644 --- a/crates/bevy_pbr/src/wireframe.rs +++ b/crates/bevy_pbr/src/wireframe.rs @@ -2,11 +2,12 @@ use crate::MeshPipeline; use crate::{DrawMesh, MeshPipelineKey, MeshUniform, SetMeshBindGroup, SetMeshViewBindGroup}; use bevy_app::Plugin; use bevy_asset::{load_internal_asset, Handle, HandleUntyped}; -use bevy_core_pipeline::Opaque3d; +use bevy_core_pipeline::core_3d::Opaque3d; use bevy_ecs::{prelude::*, reflect::ReflectComponent}; use bevy_reflect::std_traits::ReflectDefault; use bevy_reflect::{Reflect, TypeUuid}; use bevy_render::{ + extract_resource::{ExtractResource, ExtractResourcePlugin}, mesh::{Mesh, MeshVertexBufferLayout}, render_asset::RenderAssets, render_phase::{AddRenderCommand, DrawFunctions, RenderPhase, SetItemPipeline}, @@ -34,7 +35,8 @@ impl Plugin for WireframePlugin { Shader::from_wgsl ); - app.init_resource::(); + app.init_resource::() + .add_plugin(ExtractResourcePlugin::::default()); if let Ok(render_app) = app.get_sub_app_mut(RenderApp) { render_app @@ -42,18 +44,11 @@ impl Plugin for WireframePlugin { .init_resource::() .init_resource::>() .add_system_to_stage(RenderStage::Extract, extract_wireframes) - .add_system_to_stage(RenderStage::Extract, extract_wireframe_config) .add_system_to_stage(RenderStage::Queue, queue_wireframes); } } } -fn extract_wireframe_config(mut commands: Commands, wireframe_config: Res) { - if wireframe_config.is_added() || wireframe_config.is_changed() { - commands.insert_resource(wireframe_config.into_inner().clone()); - } -} - fn extract_wireframes(mut commands: Commands, query: Query>) { for entity in query.iter() { commands.get_or_spawn(entity).insert(Wireframe); @@ -65,7 +60,7 @@ fn extract_wireframes(mut commands: Commands, query: Query>, render_meshes: Res>, diff --git a/crates/bevy_ptr/src/lib.rs b/crates/bevy_ptr/src/lib.rs index 075751a449a62..e8173f674c47a 100644 --- a/crates/bevy_ptr/src/lib.rs +++ b/crates/bevy_ptr/src/lib.rs @@ -180,7 +180,7 @@ impl<'a> OwningPtr<'a> { /// Must point to a valid `T`. #[inline] pub unsafe fn drop_as(self) { - self.as_ptr().cast::().drop_in_place() + self.as_ptr().cast::().drop_in_place(); } /// Gets the underlying pointer, erasing the associated lifetime. diff --git a/crates/bevy_reflect/Cargo.toml b/crates/bevy_reflect/Cargo.toml index 8d74f36495520..e520f45b6c515 100644 --- a/crates/bevy_reflect/Cargo.toml +++ b/crates/bevy_reflect/Cargo.toml @@ -22,10 +22,10 @@ erased-serde = "0.3" downcast-rs = "1.2" parking_lot = "0.11.0" thiserror = "1.0" +once_cell = "1.11" serde = "1" smallvec = { version = "1.6", features = ["serde", "union", "const_generics"], optional = true } glam = { version = "0.20.0", features = ["serde"], optional = true } -hashbrown = { version = "0.11", features = ["serde"], optional = true } [dev-dependencies] ron = "0.7.0" diff --git a/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml b/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml index 3663af55af8db..72991cf776e1c 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml +++ b/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml @@ -17,4 +17,4 @@ bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.8.0-dev" } syn = { version = "1.0", features = ["full"] } proc-macro2 = "1.0" quote = "1.0" -uuid = { version = "0.8", features = ["v4"] } +uuid = { version = "1.1", features = ["v4"] } diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/container_attributes.rs b/crates/bevy_reflect/bevy_reflect_derive/src/container_attributes.rs index 8e16e77626c5a..5f2d16c4a6af9 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/container_attributes.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/container_attributes.rs @@ -15,9 +15,13 @@ use syn::{Meta, NestedMeta, Path}; // The "special" trait idents that are used internally for reflection. // Received via attributes like `#[reflect(PartialEq, Hash, ...)]` +const DEBUG_ATTR: &str = "Debug"; const PARTIAL_EQ_ATTR: &str = "PartialEq"; const HASH_ATTR: &str = "Hash"; -const SERIALIZE_ATTR: &str = "Serialize"; + +// The traits listed below are not considered "special" (i.e. they use the `ReflectMyTrait` syntax) +// but useful to know exist nonetheless +pub(crate) const REFLECT_DEFAULT: &str = "ReflectDefault"; /// A marker for trait implementations registered via the `Reflect` derive macro. #[derive(Clone)] @@ -46,9 +50,9 @@ impl Default for TraitImpl { /// `Reflect` derive macro using the helper attribute: `#[reflect(...)]`. /// /// The list of special traits are as follows: +/// * `Debug` /// * `Hash` /// * `PartialEq` -/// * `Serialize` /// /// When registering a trait, there are a few things to keep in mind: /// * Traits must have a valid `Reflect{}` struct in scope. For example, `Default` @@ -101,9 +105,9 @@ impl Default for TraitImpl { /// #[derive(Default)] pub(crate) struct ReflectTraits { + debug: TraitImpl, hash: TraitImpl, partial_eq: TraitImpl, - serialize: TraitImpl, idents: Vec, } @@ -123,9 +127,9 @@ impl ReflectTraits { }; match ident.as_str() { + DEBUG_ATTR => traits.debug = TraitImpl::Implemented, PARTIAL_EQ_ATTR => traits.partial_eq = TraitImpl::Implemented, HASH_ATTR => traits.hash = TraitImpl::Implemented, - SERIALIZE_ATTR => traits.serialize = TraitImpl::Implemented, // We only track reflected idents for traits not considered special _ => traits.idents.push(utility::get_reflect_ident(&ident)), } @@ -145,9 +149,9 @@ impl ReflectTraits { // This should be the ident of the custom function let trait_func_ident = TraitImpl::Custom(segment.ident.clone()); match ident.as_str() { + DEBUG_ATTR => traits.debug = trait_func_ident, PARTIAL_EQ_ATTR => traits.partial_eq = trait_func_ident, HASH_ATTR => traits.hash = trait_func_ident, - SERIALIZE_ATTR => traits.serialize = trait_func_ident, _ => {} } } @@ -171,55 +175,70 @@ impl ReflectTraits { &self.idents } - /// Returns the logic for `Reflect::reflect_hash` as a `TokenStream`. + /// Returns the implementation of `Reflect::reflect_hash` as a `TokenStream`. /// /// If `Hash` was not registered, returns `None`. - pub fn get_hash_impl(&self, path: &Path) -> Option { + pub fn get_hash_impl(&self, bevy_reflect_path: &Path) -> Option { match &self.hash { TraitImpl::Implemented => Some(quote! { - use std::hash::{Hash, Hasher}; - let mut hasher = #path::ReflectHasher::default(); - Hash::hash(&std::any::Any::type_id(self), &mut hasher); - Hash::hash(self, &mut hasher); - Some(hasher.finish()) + fn reflect_hash(&self) -> Option { + use std::hash::{Hash, Hasher}; + let mut hasher = #bevy_reflect_path::ReflectHasher::default(); + Hash::hash(&std::any::Any::type_id(self), &mut hasher); + Hash::hash(self, &mut hasher); + Some(hasher.finish()) + } }), TraitImpl::Custom(impl_fn) => Some(quote! { - Some(#impl_fn(self)) + fn reflect_hash(&self) -> Option { + Some(#impl_fn(self)) + } }), TraitImpl::NotImplemented => None, } } - /// Returns the logic for `Reflect::reflect_partial_eq` as a `TokenStream`. + /// Returns the implementation of `Reflect::reflect_partial_eq` as a `TokenStream`. /// /// If `PartialEq` was not registered, returns `None`. - pub fn get_partial_eq_impl(&self) -> Option { + pub fn get_partial_eq_impl( + &self, + bevy_reflect_path: &Path, + ) -> Option { match &self.partial_eq { TraitImpl::Implemented => Some(quote! { - let value = value.any(); - if let Some(value) = value.downcast_ref::() { - Some(std::cmp::PartialEq::eq(self, value)) - } else { - Some(false) + fn reflect_partial_eq(&self, value: &dyn #bevy_reflect_path::Reflect) -> Option { + let value = value.any(); + if let Some(value) = value.downcast_ref::() { + Some(std::cmp::PartialEq::eq(self, value)) + } else { + Some(false) + } } }), TraitImpl::Custom(impl_fn) => Some(quote! { - Some(#impl_fn(self, value)) + fn reflect_partial_eq(&self, value: &dyn #bevy_reflect_path::Reflect) -> Option { + Some(#impl_fn(self, value)) + } }), TraitImpl::NotImplemented => None, } } - /// Returns the logic for `Reflect::serializable` as a `TokenStream`. + /// Returns the implementation of `Reflect::debug` as a `TokenStream`. /// - /// If `Serialize` was not registered, returns `None`. - pub fn get_serialize_impl(&self, path: &Path) -> Option { - match &self.serialize { + /// If `Debug` was not registered, returns `None`. + pub fn get_debug_impl(&self) -> Option { + match &self.debug { TraitImpl::Implemented => Some(quote! { - Some(#path::serde::Serializable::Borrowed(self)) + fn debug(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Debug::fmt(self, f) + } }), TraitImpl::Custom(impl_fn) => Some(quote! { - Some(#impl_fn(self)) + fn debug(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + #impl_fn(self, f) + } }), TraitImpl::NotImplemented => None, } diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/field_attributes.rs b/crates/bevy_reflect/bevy_reflect_derive/src/field_attributes.rs index a0f3d2b6bc3a0..32b9bc1e8dfbf 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/field_attributes.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/field_attributes.rs @@ -7,15 +7,37 @@ use crate::REFLECT_ATTRIBUTE_NAME; use quote::ToTokens; use syn::spanned::Spanned; -use syn::{Attribute, Meta, NestedMeta}; +use syn::{Attribute, Lit, Meta, NestedMeta}; pub(crate) static IGNORE_ATTR: &str = "ignore"; +pub(crate) static DEFAULT_ATTR: &str = "default"; -/// A container for attributes defined on a field reflected type's field. +/// A container for attributes defined on a reflected type's field. #[derive(Default)] pub(crate) struct ReflectFieldAttr { /// Determines if this field should be ignored. pub ignore: bool, + /// Sets the default behavior of this field. + pub default: DefaultBehavior, +} + +/// Controls how the default value is determined for a field. +pub(crate) enum DefaultBehavior { + /// Field is required. + Required, + /// Field can be defaulted using `Default::default()`. + Default, + /// Field can be created using the given function name. + /// + /// This assumes the function is in scope, is callable with zero arguments, + /// and returns the expected type. + Func(syn::ExprPath), +} + +impl Default for DefaultBehavior { + fn default() -> Self { + Self::Required + } } /// Parse all field attributes marked "reflect" (such as `#[reflect(ignore)]`). @@ -44,16 +66,36 @@ pub(crate) fn parse_field_attrs(attrs: &[Attribute]) -> Result Result<(), syn::Error> { match meta { Meta::Path(path) if path.is_ident(IGNORE_ATTR) => { args.ignore = true; Ok(()) } + Meta::Path(path) if path.is_ident(DEFAULT_ATTR) => { + args.default = DefaultBehavior::Default; + Ok(()) + } Meta::Path(path) => Err(syn::Error::new( path.span(), format!("unknown attribute parameter: {}", path.to_token_stream()), )), + Meta::NameValue(pair) if pair.path.is_ident(DEFAULT_ATTR) => { + let lit = &pair.lit; + match lit { + Lit::Str(lit_str) => { + args.default = DefaultBehavior::Func(lit_str.parse()?); + Ok(()) + } + err => { + Err(syn::Error::new( + err.span(), + format!("expected a string literal containing the name of a function, but found: {}", err.to_token_stream()), + )) + } + } + } Meta::NameValue(pair) => { let path = &pair.path; Err(syn::Error::new( diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs b/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs index 8e9e9c679295b..1827424c93d40 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs @@ -1,3 +1,5 @@ +use crate::container_attributes::REFLECT_DEFAULT; +use crate::field_attributes::DefaultBehavior; use crate::ReflectDeriveData; use proc_macro::TokenStream; use proc_macro2::Span; @@ -53,24 +55,28 @@ fn impl_struct_internal(derive_data: &ReflectDeriveData, is_tuple: bool) -> Toke }; let field_types = derive_data.active_types(); - let MemberValuePair(ignored_members, ignored_values) = - get_ignored_fields(derive_data, is_tuple); let MemberValuePair(active_members, active_values) = get_active_fields(derive_data, &ref_struct, &ref_struct_type, is_tuple); - let constructor = if derive_data.traits().contains("ReflectDefault") { + let constructor = if derive_data.traits().contains(REFLECT_DEFAULT) { quote!( let mut __this = Self::default(); #( - __this.#active_members = #active_values; + if let Some(__field) = #active_values() { + // Iff field exists -> use its value + __this.#active_members = __field; + } )* Some(__this) ) } else { + let MemberValuePair(ignored_members, ignored_values) = + get_ignored_fields(derive_data, is_tuple); + quote!( Some( Self { - #(#active_members: #active_values,)* + #(#active_members: #active_values()?,)* #(#ignored_members: #ignored_values,)* } ) @@ -106,14 +112,19 @@ fn impl_struct_internal(derive_data: &ReflectDeriveData, is_tuple: bool) -> Toke } /// Get the collection of ignored field definitions +/// +/// Each value of the `MemberValuePair` is a token stream that generates a +/// a default value for the ignored field. fn get_ignored_fields(derive_data: &ReflectDeriveData, is_tuple: bool) -> MemberValuePair { MemberValuePair::new( derive_data .ignored_fields() .map(|field| { let member = get_ident(field.data, field.index, is_tuple); - let value = quote! { - Default::default() + + let value = match &field.attrs.default { + DefaultBehavior::Func(path) => quote! {#path()}, + _ => quote! {Default::default()}, }; (member, value) @@ -122,7 +133,10 @@ fn get_ignored_fields(derive_data: &ReflectDeriveData, is_tuple: bool) -> Member ) } -/// Get the collection of active field definitions +/// Get the collection of active field definitions. +/// +/// Each value of the `MemberValuePair` is a token stream that generates a +/// closure of type `fn() -> Option` where `T` is that field's type. fn get_active_fields( derive_data: &ReflectDeriveData, dyn_struct_name: &Ident, @@ -139,12 +153,33 @@ fn get_active_fields( let accessor = get_field_accessor(field.data, field.index, is_tuple); let ty = field.data.ty.clone(); - let value = quote! { { - <#ty as #bevy_reflect_path::FromReflect>::from_reflect( - // Accesses the field on the given dynamic struct or tuple struct - #bevy_reflect_path::#struct_type::field(#dyn_struct_name, #accessor)? - )? - }}; + let get_field = quote! { + #bevy_reflect_path::#struct_type::field(#dyn_struct_name, #accessor) + }; + + let value = match &field.attrs.default { + DefaultBehavior::Func(path) => quote! { + (|| + if let Some(field) = #get_field { + <#ty as #bevy_reflect_path::FromReflect>::from_reflect(field) + } else { + Some(#path()) + } + ) + }, + DefaultBehavior::Default => quote! { + (|| + if let Some(field) = #get_field { + <#ty as #bevy_reflect_path::FromReflect>::from_reflect(field) + } else { + Some(Default::default()) + } + ) + }, + DefaultBehavior::Required => quote! { + (|| <#ty as #bevy_reflect_path::FromReflect>::from_reflect(#get_field?)) + }, + }; (member, value) }) diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/impls.rs b/crates/bevy_reflect/bevy_reflect_derive/src/impls.rs index 18b4c0ab96e4b..7aa4ea373df72 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/impls.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/impls.rs @@ -32,25 +32,38 @@ pub(crate) fn impl_struct(derive_data: &ReflectDeriveData) -> TokenStream { .unwrap_or_else(|| Member::Unnamed(Index::from(field.index))) }) .collect::>(); + let field_types = derive_data + .active_fields() + .map(|field| field.data.ty.clone()) + .collect::>(); let field_count = field_idents.len(); let field_indices = (0..field_count).collect::>(); - let hash_fn = derive_data - .traits() - .get_hash_impl(bevy_reflect_path) - .unwrap_or_else(|| quote!(None)); - let serialize_fn = derive_data - .traits() - .get_serialize_impl(bevy_reflect_path) - .unwrap_or_else(|| quote!(None)); + let hash_fn = derive_data.traits().get_hash_impl(bevy_reflect_path); let partial_eq_fn = derive_data .traits() - .get_partial_eq_impl() + .get_partial_eq_impl(bevy_reflect_path) .unwrap_or_else(|| { quote! { - #bevy_reflect_path::struct_partial_eq(self, value) + fn reflect_partial_eq(&self, value: &dyn #bevy_reflect_path::Reflect) -> Option { + #bevy_reflect_path::struct_partial_eq(self, value) + } } }); + let debug_fn = derive_data.traits().get_debug_impl(); + + let typed_impl = impl_typed( + struct_name, + derive_data.generics(), + quote! { + let fields: [#bevy_reflect_path::NamedField; #field_count] = [ + #(#bevy_reflect_path::NamedField::new::<#field_types, _>(#field_names),)* + ]; + let info = #bevy_reflect_path::StructInfo::new::(&fields); + #bevy_reflect_path::TypeInfo::Struct(info) + }, + bevy_reflect_path, + ); let get_type_registration_impl = derive_data.get_type_registration(); let (impl_generics, ty_generics, where_clause) = derive_data.generics().split_for_impl(); @@ -58,6 +71,8 @@ pub(crate) fn impl_struct(derive_data: &ReflectDeriveData) -> TokenStream { TokenStream::from(quote! { #get_type_registration_impl + #typed_impl + impl #impl_generics #bevy_reflect_path::Struct for #struct_name #ty_generics #where_clause { fn field(&self, name: &str) -> Option<&dyn #bevy_reflect_path::Reflect> { match name { @@ -117,6 +132,11 @@ pub(crate) fn impl_struct(derive_data: &ReflectDeriveData) -> TokenStream { std::any::type_name::() } + #[inline] + fn get_type_info(&self) -> &'static #bevy_reflect_path::TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn std::any::Any { self @@ -166,17 +186,11 @@ pub(crate) fn impl_struct(derive_data: &ReflectDeriveData) -> TokenStream { #bevy_reflect_path::ReflectMut::Struct(self) } - fn serializable(&self) -> Option<#bevy_reflect_path::serde::Serializable> { - #serialize_fn - } + #hash_fn - fn reflect_hash(&self) -> Option { - #hash_fn - } + #partial_eq_fn - fn reflect_partial_eq(&self, value: &dyn #bevy_reflect_path::Reflect) -> Option { - #partial_eq_fn - } + #debug_fn } }) } @@ -191,30 +205,45 @@ pub(crate) fn impl_tuple_struct(derive_data: &ReflectDeriveData) -> TokenStream .active_fields() .map(|field| Member::Unnamed(Index::from(field.index))) .collect::>(); + let field_types = derive_data + .active_fields() + .map(|field| field.data.ty.clone()) + .collect::>(); let field_count = field_idents.len(); let field_indices = (0..field_count).collect::>(); - let hash_fn = derive_data - .traits() - .get_hash_impl(bevy_reflect_path) - .unwrap_or_else(|| quote!(None)); - let serialize_fn = derive_data - .traits() - .get_serialize_impl(bevy_reflect_path) - .unwrap_or_else(|| quote!(None)); + let hash_fn = derive_data.traits().get_hash_impl(bevy_reflect_path); let partial_eq_fn = derive_data .traits() - .get_partial_eq_impl() + .get_partial_eq_impl(bevy_reflect_path) .unwrap_or_else(|| { quote! { - #bevy_reflect_path::tuple_struct_partial_eq(self, value) + fn reflect_partial_eq(&self, value: &dyn #bevy_reflect_path::Reflect) -> Option { + #bevy_reflect_path::tuple_struct_partial_eq(self, value) + } } }); + let debug_fn = derive_data.traits().get_debug_impl(); + + let typed_impl = impl_typed( + struct_name, + derive_data.generics(), + quote! { + let fields: [#bevy_reflect_path::UnnamedField; #field_count] = [ + #(#bevy_reflect_path::UnnamedField::new::<#field_types>(#field_indices),)* + ]; + let info = #bevy_reflect_path::TupleStructInfo::new::(&fields); + #bevy_reflect_path::TypeInfo::TupleStruct(info) + }, + bevy_reflect_path, + ); let (impl_generics, ty_generics, where_clause) = derive_data.generics().split_for_impl(); TokenStream::from(quote! { #get_type_registration_impl + #typed_impl + impl #impl_generics #bevy_reflect_path::TupleStruct for #struct_name #ty_generics #where_clause { fn field(&self, index: usize) -> Option<&dyn #bevy_reflect_path::Reflect> { match index { @@ -253,6 +282,11 @@ pub(crate) fn impl_tuple_struct(derive_data: &ReflectDeriveData) -> TokenStream std::any::type_name::() } + #[inline] + fn get_type_info(&self) -> &'static #bevy_reflect_path::TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn std::any::Any { self @@ -301,17 +335,11 @@ pub(crate) fn impl_tuple_struct(derive_data: &ReflectDeriveData) -> TokenStream #bevy_reflect_path::ReflectMut::TupleStruct(self) } - fn serializable(&self) -> Option<#bevy_reflect_path::serde::Serializable> { - #serialize_fn - } + #hash_fn - fn reflect_hash(&self) -> Option { - #hash_fn - } + #partial_eq_fn - fn reflect_partial_eq(&self, value: &dyn #bevy_reflect_path::Reflect) -> Option { - #partial_eq_fn - } + #debug_fn } }) } @@ -322,22 +350,28 @@ pub(crate) fn impl_value( generics: &Generics, get_type_registration_impl: proc_macro2::TokenStream, bevy_reflect_path: &Path, - reflect_attrs: &ReflectTraits, + reflect_traits: &ReflectTraits, ) -> TokenStream { - let hash_fn = reflect_attrs - .get_hash_impl(bevy_reflect_path) - .unwrap_or_else(|| quote!(None)); - let partial_eq_fn = reflect_attrs - .get_partial_eq_impl() - .unwrap_or_else(|| quote!(None)); - let serialize_fn = reflect_attrs - .get_serialize_impl(bevy_reflect_path) - .unwrap_or_else(|| quote!(None)); + let hash_fn = reflect_traits.get_hash_impl(bevy_reflect_path); + let partial_eq_fn = reflect_traits.get_partial_eq_impl(bevy_reflect_path); + let debug_fn = reflect_traits.get_debug_impl(); + + let typed_impl = impl_typed( + type_name, + generics, + quote! { + let info = #bevy_reflect_path::ValueInfo::new::(); + #bevy_reflect_path::TypeInfo::Value(info) + }, + bevy_reflect_path, + ); let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); TokenStream::from(quote! { #get_type_registration_impl + #typed_impl + // SAFE: any and any_mut both return self unsafe impl #impl_generics #bevy_reflect_path::Reflect for #type_name #ty_generics #where_clause { #[inline] @@ -345,6 +379,11 @@ pub(crate) fn impl_value( std::any::type_name::() } + #[inline] + fn get_type_info(&self) -> &'static #bevy_reflect_path::TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn std::any::Any { self @@ -394,17 +433,46 @@ pub(crate) fn impl_value( #bevy_reflect_path::ReflectMut::Value(self) } - fn reflect_hash(&self) -> Option { - #hash_fn - } + #hash_fn - fn reflect_partial_eq(&self, value: &dyn #bevy_reflect_path::Reflect) -> Option { - #partial_eq_fn - } + #partial_eq_fn - fn serializable(&self) -> Option<#bevy_reflect_path::serde::Serializable> { - #serialize_fn - } + #debug_fn } }) } + +fn impl_typed( + type_name: &Ident, + generics: &Generics, + generator: proc_macro2::TokenStream, + bevy_reflect_path: &Path, +) -> proc_macro2::TokenStream { + let is_generic = !generics.params.is_empty(); + + let static_generator = if is_generic { + quote! { + static CELL: #bevy_reflect_path::utility::GenericTypeInfoCell = #bevy_reflect_path::utility::GenericTypeInfoCell::new(); + CELL.get_or_insert::(|| { + #generator + }) + } + } else { + quote! { + static CELL: #bevy_reflect_path::utility::NonGenericTypeInfoCell = #bevy_reflect_path::utility::NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| { + #generator + }) + } + }; + + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + quote! { + impl #impl_generics #bevy_reflect_path::Typed for #type_name #ty_generics #where_clause { + fn type_info() -> &'static #bevy_reflect_path::TypeInfo { + #static_generator + } + } + } +} diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs b/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs index 8d59def2c0f9b..7cf9fe560b004 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs @@ -61,6 +61,8 @@ pub fn derive_reflect(input: TokenStream) -> TokenStream { /// /// This macro supports the following field attributes: /// * `#[reflect(ignore)]`: Ignores the field. This requires the field to implement [`Default`]. +/// * `#[reflect(default)]`: If the field's value cannot be read, uses its [`Default`] implementation. +/// * `#[reflect(default = "some_func")]`: If the field's value cannot be read, uses the function with the given name. /// #[proc_macro_derive(FromReflect, attributes(reflect))] pub fn derive_from_reflect(input: TokenStream) -> TokenStream { diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/trait_reflection.rs b/crates/bevy_reflect/bevy_reflect_derive/src/trait_reflection.rs index fe4d4a6bb257e..7d16bfb01c85a 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/trait_reflection.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/trait_reflection.rs @@ -21,6 +21,10 @@ impl Parse for TraitInfo { } } +/// A trait attribute macro that allows a reflected type to be downcast to a trait object. +/// +/// This generates a struct that takes the form `ReflectMyTrait`. An instance of this struct can then be +/// used to perform the conversion. pub(crate) fn reflect_trait(_args: &TokenStream, input: TokenStream) -> TokenStream { let trait_info = parse_macro_input!(input as TraitInfo); let item_trait = &trait_info.item_trait; @@ -28,23 +32,51 @@ pub(crate) fn reflect_trait(_args: &TokenStream, input: TokenStream) -> TokenStr let trait_vis = &item_trait.vis; let reflect_trait_ident = crate::utility::get_reflect_ident(&item_trait.ident.to_string()); let bevy_reflect_path = BevyManifest::default().get_path("bevy_reflect"); + + let struct_doc = format!( + " A type generated by the #[reflect_trait] macro for the `{}` trait.\n\n This allows casting from `dyn Reflect` to `dyn {}`.", + trait_ident, + trait_ident + ); + let get_doc = format!( + " Downcast a `&dyn Reflect` type to `&dyn {}`.\n\n If the type cannot be downcast, `None` is returned.", + trait_ident, + ); + let get_mut_doc = format!( + " Downcast a `&mut dyn Reflect` type to `&mut dyn {}`.\n\n If the type cannot be downcast, `None` is returned.", + trait_ident, + ); + let get_box_doc = format!( + " Downcast a `Box` type to `Box`.\n\n If the type cannot be downcast, this will return `Err(Box)`.", + trait_ident, + ); + TokenStream::from(quote! { #item_trait + #[doc = #struct_doc] #[derive(Clone)] #trait_vis struct #reflect_trait_ident { get_func: fn(&dyn #bevy_reflect_path::Reflect) -> Option<&dyn #trait_ident>, get_mut_func: fn(&mut dyn #bevy_reflect_path::Reflect) -> Option<&mut dyn #trait_ident>, + get_boxed_func: fn(Box) -> Result, Box>, } impl #reflect_trait_ident { + #[doc = #get_doc] pub fn get<'a>(&self, reflect_value: &'a dyn #bevy_reflect_path::Reflect) -> Option<&'a dyn #trait_ident> { (self.get_func)(reflect_value) } + #[doc = #get_mut_doc] pub fn get_mut<'a>(&self, reflect_value: &'a mut dyn #bevy_reflect_path::Reflect) -> Option<&'a mut dyn #trait_ident> { (self.get_mut_func)(reflect_value) } + + #[doc = #get_box_doc] + pub fn get_boxed(&self, reflect_value: Box) -> Result, Box> { + (self.get_boxed_func)(reflect_value) + } } impl #bevy_reflect_path::FromType for #reflect_trait_ident { @@ -55,6 +87,9 @@ pub(crate) fn reflect_trait(_args: &TokenStream, input: TokenStream) -> TokenStr }, get_mut_func: |reflect_value| { reflect_value.downcast_mut::().map(|value| value as &mut dyn #trait_ident) + }, + get_boxed_func: |reflect_value| { + reflect_value.downcast::().map(|value| value as Box) } } } diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/type_uuid.rs b/crates/bevy_reflect/bevy_reflect_derive/src/type_uuid.rs index 8adb2dbcdad7e..a8017dc30e51f 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/type_uuid.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/type_uuid.rs @@ -17,7 +17,7 @@ pub(crate) fn type_uuid_derive(input: proc_macro::TokenStream) -> proc_macro::To ast.generics.type_params_mut().for_each(|param| { param .bounds - .push(syn::parse_quote!(#bevy_reflect_path::TypeUuid)) + .push(syn::parse_quote!(#bevy_reflect_path::TypeUuid)); }); let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl(); diff --git a/crates/bevy_reflect/src/array.rs b/crates/bevy_reflect/src/array.rs index 0bbcf271272af..bc8c08a108e80 100644 --- a/crates/bevy_reflect/src/array.rs +++ b/crates/bevy_reflect/src/array.rs @@ -1,7 +1,9 @@ -use crate::{serde::Serializable, Reflect, ReflectMut, ReflectRef}; -use serde::ser::SerializeSeq; +use crate::{ + utility::NonGenericTypeInfoCell, DynamicInfo, Reflect, ReflectMut, ReflectRef, TypeInfo, Typed, +}; use std::{ - any::Any, + any::{Any, TypeId}, + fmt::Debug, hash::{Hash, Hasher}, }; @@ -37,6 +39,73 @@ pub trait Array: Reflect { } } +/// A container for compile-time array info. +#[derive(Clone, Debug)] +pub struct ArrayInfo { + type_name: &'static str, + type_id: TypeId, + item_type_name: &'static str, + item_type_id: TypeId, + capacity: usize, +} + +impl ArrayInfo { + /// Create a new [`ArrayInfo`]. + /// + /// # Arguments + /// + /// * `capacity`: The maximum capacity of the underlying array. + /// + pub fn new(capacity: usize) -> Self { + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + item_type_name: std::any::type_name::(), + item_type_id: TypeId::of::(), + capacity, + } + } + + /// The compile-time capacity of the array. + pub fn capacity(&self) -> usize { + self.capacity + } + + /// The [type name] of the array. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the array. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the array type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } + + /// The [type name] of the array item. + /// + /// [type name]: std::any::type_name + pub fn item_type_name(&self) -> &'static str { + self.item_type_name + } + + /// The [`TypeId`] of the array item. + pub fn item_type_id(&self) -> TypeId { + self.item_type_id + } + + /// Check if the given type matches the array item type. + pub fn item_is(&self) -> bool { + TypeId::of::() == self.item_type_id + } +} + /// A fixed-size list of reflected values. /// /// This differs from [`DynamicList`] in that the size of the [`DynamicArray`] @@ -89,6 +158,11 @@ unsafe impl Reflect for DynamicArray { self.name.as_str() } + #[inline] + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn Any { self @@ -142,10 +216,6 @@ unsafe impl Reflect for DynamicArray { fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { array_partial_eq(self, value) } - - fn serializable(&self) -> Option { - Some(Serializable::Borrowed(self)) - } } impl Array for DynamicArray { @@ -185,6 +255,13 @@ impl Array for DynamicArray { } } +impl Typed for DynamicArray { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Dynamic(DynamicInfo::new::())) + } +} + /// An iterator over an [`Array`]. pub struct ArrayIter<'a> { pub(crate) array: &'a dyn Array, @@ -210,43 +287,6 @@ impl<'a> Iterator for ArrayIter<'a> { impl<'a> ExactSizeIterator for ArrayIter<'a> {} -impl serde::Serialize for dyn Array { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - array_serialize(self, serializer) - } -} - -impl serde::Serialize for DynamicArray { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - array_serialize(self, serializer) - } -} - -/// Serializes the given [array](Array). -#[inline] -pub fn array_serialize(array: &A, serializer: S) -> Result -where - S: serde::Serializer, -{ - let mut seq = serializer.serialize_seq(Some(array.len()))?; - for element in array.iter() { - let serializable = element.serializable().ok_or_else(|| { - serde::ser::Error::custom(format!( - "Type '{}' does not support `Reflect` serialization", - element.type_name() - )) - })?; - seq.serialize_element(serializable.borrow())?; - } - seq.end() -} - /// Returns the `u64` hash of the given [array](Array). #[inline] pub fn array_hash(array: &A) -> Option { @@ -254,7 +294,7 @@ pub fn array_hash(array: &A) -> Option { std::any::Any::type_id(array).hash(&mut hasher); array.len().hash(&mut hasher); for value in array.iter() { - hasher.write_u64(value.reflect_hash()?) + hasher.write_u64(value.reflect_hash()?); } Some(hasher.finish()) } @@ -298,3 +338,29 @@ pub fn array_partial_eq(array: &A, reflect: &dyn Reflect) -> Option) -> std::fmt::Result { + let mut debug = f.debug_list(); + for item in dyn_array.iter() { + debug.entry(&item as &dyn Debug); + } + debug.finish() +} diff --git a/crates/bevy_reflect/src/fields.rs b/crates/bevy_reflect/src/fields.rs new file mode 100644 index 0000000000000..21dc9ec5f75e2 --- /dev/null +++ b/crates/bevy_reflect/src/fields.rs @@ -0,0 +1,84 @@ +use crate::Reflect; +use std::any::{Any, TypeId}; +use std::borrow::Cow; + +/// The named field of a reflected struct. +#[derive(Clone, Debug)] +pub struct NamedField { + name: Cow<'static, str>, + type_name: &'static str, + type_id: TypeId, +} + +impl NamedField { + /// Create a new [`NamedField`]. + pub fn new>>(name: TName) -> Self { + Self { + name: name.into(), + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + } + } + + /// The name of the field. + pub fn name(&self) -> &Cow<'static, str> { + &self.name + } + + /// The [type name] of the field. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the field. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the field type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } +} + +/// The unnamed field of a reflected tuple or tuple struct. +#[derive(Clone, Debug)] +pub struct UnnamedField { + index: usize, + type_name: &'static str, + type_id: TypeId, +} + +impl UnnamedField { + pub fn new(index: usize) -> Self { + Self { + index, + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + } + } + + /// Returns the index of the field. + pub fn index(&self) -> usize { + self.index + } + + /// The [type name] of the field. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the field. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the field type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } +} diff --git a/crates/bevy_reflect/src/impls/glam.rs b/crates/bevy_reflect/src/impls/glam.rs index 93f803cc0cf94..2310e640eded1 100644 --- a/crates/bevy_reflect/src/impls/glam.rs +++ b/crates/bevy_reflect/src/impls/glam.rs @@ -1,19 +1,19 @@ use crate as bevy_reflect; use crate::prelude::ReflectDefault; use crate::reflect::Reflect; -use crate::ReflectDeserialize; +use crate::{ReflectDeserialize, ReflectSerialize}; use bevy_reflect_derive::{impl_from_reflect_value, impl_reflect_struct, impl_reflect_value}; use glam::*; impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct IVec2 { x: i32, y: i32, } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct IVec3 { x: i32, y: i32, @@ -21,7 +21,7 @@ impl_reflect_struct!( } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct IVec4 { x: i32, y: i32, @@ -31,14 +31,14 @@ impl_reflect_struct!( ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct UVec2 { x: u32, y: u32, } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct UVec3 { x: u32, y: u32, @@ -46,7 +46,7 @@ impl_reflect_struct!( } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct UVec4 { x: u32, y: u32, @@ -56,14 +56,14 @@ impl_reflect_struct!( ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct Vec2 { x: f32, y: f32, } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct Vec3 { x: f32, y: f32, @@ -71,7 +71,7 @@ impl_reflect_struct!( } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct Vec3A { x: f32, y: f32, @@ -79,7 +79,7 @@ impl_reflect_struct!( } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct Vec4 { x: f32, y: f32, @@ -89,14 +89,14 @@ impl_reflect_struct!( ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct DVec2 { x: f64, y: f64, } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct DVec3 { x: f64, y: f64, @@ -104,7 +104,7 @@ impl_reflect_struct!( } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct DVec4 { x: f64, y: f64, @@ -114,7 +114,7 @@ impl_reflect_struct!( ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct Mat3 { x_axis: Vec3, y_axis: Vec3, @@ -122,7 +122,7 @@ impl_reflect_struct!( } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct Mat4 { x_axis: Vec4, y_axis: Vec4, @@ -132,7 +132,7 @@ impl_reflect_struct!( ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct DMat3 { x_axis: DVec3, y_axis: DVec3, @@ -140,7 +140,7 @@ impl_reflect_struct!( } ); impl_reflect_struct!( - #[reflect(PartialEq, Serialize, Deserialize, Default)] + #[reflect(Debug, PartialEq, Serialize, Deserialize, Default)] struct DMat4 { x_axis: DVec4, y_axis: DVec4, @@ -153,8 +153,8 @@ impl_reflect_struct!( // mechanisms for read-only fields. I doubt those mechanisms would be added, // so for now quaternions will remain as values. They are represented identically // to Vec4 and DVec4, so you may use those instead and convert between. -impl_reflect_value!(Quat(PartialEq, Serialize, Deserialize, Default)); -impl_reflect_value!(DQuat(PartialEq, Serialize, Deserialize, Default)); +impl_reflect_value!(Quat(Debug, PartialEq, Serialize, Deserialize, Default)); +impl_reflect_value!(DQuat(Debug, PartialEq, Serialize, Deserialize, Default)); impl_from_reflect_value!(Quat); impl_from_reflect_value!(DQuat); diff --git a/crates/bevy_reflect/src/impls/smallvec.rs b/crates/bevy_reflect/src/impls/smallvec.rs index 6bd32dc0b25c9..2ab1f97869c1e 100644 --- a/crates/bevy_reflect/src/impls/smallvec.rs +++ b/crates/bevy_reflect/src/impls/smallvec.rs @@ -1,8 +1,9 @@ use smallvec::SmallVec; use std::any::Any; +use crate::utility::GenericTypeInfoCell; use crate::{ - serde::Serializable, Array, ArrayIter, FromReflect, List, Reflect, ReflectMut, ReflectRef, + Array, ArrayIter, FromReflect, List, ListInfo, Reflect, ReflectMut, ReflectRef, TypeInfo, Typed, }; impl Array for SmallVec @@ -63,6 +64,10 @@ where std::any::type_name::() } + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + fn any(&self) -> &dyn Any { self } @@ -100,16 +105,18 @@ where Box::new(List::clone_dynamic(self)) } - fn reflect_hash(&self) -> Option { - None - } - fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { crate::list_partial_eq(self, value) } +} - fn serializable(&self) -> Option { - None +impl Typed for SmallVec +where + T::Item: FromReflect + Clone, +{ + fn type_info() -> &'static TypeInfo { + static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); + CELL.get_or_insert::(|| TypeInfo::List(ListInfo::new::())) } } diff --git a/crates/bevy_reflect/src/impls/std.rs b/crates/bevy_reflect/src/impls/std.rs index f678627c4a410..13dae6c634580 100644 --- a/crates/bevy_reflect/src/impls/std.rs +++ b/crates/bevy_reflect/src/impls/std.rs @@ -1,10 +1,11 @@ use crate as bevy_reflect; use crate::{ - map_partial_eq, serde::Serializable, Array, ArrayIter, DynamicMap, FromReflect, FromType, - GetTypeRegistration, List, Map, MapIter, Reflect, ReflectDeserialize, ReflectMut, ReflectRef, - TypeRegistration, + map_partial_eq, Array, ArrayInfo, ArrayIter, DynamicMap, FromReflect, FromType, + GetTypeRegistration, List, ListInfo, Map, MapInfo, MapIter, Reflect, ReflectDeserialize, + ReflectMut, ReflectRef, ReflectSerialize, TypeInfo, TypeRegistration, Typed, ValueInfo, }; +use crate::utility::{GenericTypeInfoCell, NonGenericTypeInfoCell}; use bevy_reflect_derive::{impl_from_reflect_value, impl_reflect_value}; use bevy_utils::{Duration, HashMap, HashSet}; use serde::{Deserialize, Serialize}; @@ -15,28 +16,30 @@ use std::{ ops::Range, }; -impl_reflect_value!(bool(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(u8(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(u16(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(u32(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(u64(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(u128(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(usize(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(i8(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(i16(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(i32(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(i64(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(i128(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(isize(Hash, PartialEq, Serialize, Deserialize)); -impl_reflect_value!(f32(PartialEq, Serialize, Deserialize)); -impl_reflect_value!(f64(PartialEq, Serialize, Deserialize)); -impl_reflect_value!(String(Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(bool(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(char(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(u8(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(u16(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(u32(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(u64(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(u128(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(usize(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(i8(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(i16(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(i32(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(i64(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(i128(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(isize(Debug, Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(f32(Debug, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(f64(Debug, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(String(Debug, Hash, PartialEq, Serialize, Deserialize)); impl_reflect_value!(Option Deserialize<'de> + Reflect + 'static>(Serialize, Deserialize)); impl_reflect_value!(HashSet Deserialize<'de> + Send + Sync + 'static>(Serialize, Deserialize)); impl_reflect_value!(Range Deserialize<'de> + Send + Sync + 'static>(Serialize, Deserialize)); -impl_reflect_value!(Duration(Hash, PartialEq, Serialize, Deserialize)); +impl_reflect_value!(Duration(Debug, Hash, PartialEq, Serialize, Deserialize)); impl_from_reflect_value!(bool); +impl_from_reflect_value!(char); impl_from_reflect_value!(u8); impl_from_reflect_value!(u16); impl_from_reflect_value!(u32); @@ -108,6 +111,10 @@ unsafe impl Reflect for Vec { std::any::type_name::() } + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + fn any(&self) -> &dyn Any { self } @@ -152,9 +159,12 @@ unsafe impl Reflect for Vec { fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { crate::list_partial_eq(self, value) } +} - fn serializable(&self) -> Option { - Some(Serializable::Owned(Box::new(SerializeArrayLike(self)))) +impl Typed for Vec { + fn type_info() -> &'static TypeInfo { + static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); + CELL.get_or_insert::(|| TypeInfo::List(ListInfo::new::())) } } @@ -226,6 +236,10 @@ unsafe impl Reflect for HashMap { std::any::type_name::() } + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + fn any(&self) -> &dyn Any { self } @@ -246,7 +260,7 @@ unsafe impl Reflect for HashMap { if let ReflectRef::Map(map_value) = value.reflect_ref() { for (key, value) in map_value.iter() { if let Some(v) = Map::get_mut(self, key) { - v.apply(value) + v.apply(value); } } } else { @@ -271,16 +285,15 @@ unsafe impl Reflect for HashMap { Box::new(self.clone_dynamic()) } - fn reflect_hash(&self) -> Option { - None - } - fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { map_partial_eq(self, value) } +} - fn serializable(&self) -> Option { - None +impl Typed for HashMap { + fn type_info() -> &'static TypeInfo { + static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); + CELL.get_or_insert::(|| TypeInfo::Map(MapInfo::new::())) } } @@ -344,6 +357,10 @@ unsafe impl Reflect for [T; N] { std::any::type_name::() } + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn Any { self @@ -399,11 +416,6 @@ unsafe impl Reflect for [T; N] { fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { crate::array_partial_eq(self, value) } - - #[inline] - fn serializable(&self) -> Option { - Some(Serializable::Owned(Box::new(SerializeArrayLike(self)))) - } } impl FromReflect for [T; N] { @@ -420,15 +432,10 @@ impl FromReflect for [T; N] { } } -// Supports dynamic serialization for types that implement `Array`. -struct SerializeArrayLike<'a>(&'a dyn Array); - -impl<'a> serde::Serialize for SerializeArrayLike<'a> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - crate::array_serialize(self.0, serializer) +impl Typed for [T; N] { + fn type_info() -> &'static TypeInfo { + static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); + CELL.get_or_insert::(|| TypeInfo::Array(ArrayInfo::new::(N))) } } @@ -464,6 +471,10 @@ unsafe impl Reflect for Cow<'static, str> { std::any::type_name::() } + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + fn any(&self) -> &dyn Any { self } @@ -521,9 +532,12 @@ unsafe impl Reflect for Cow<'static, str> { Some(false) } } +} - fn serializable(&self) -> Option { - Some(Serializable::Borrowed(self)) +impl Typed for Cow<'static, str> { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Value(ValueInfo::new::())) } } @@ -531,6 +545,7 @@ impl GetTypeRegistration for Cow<'static, str> { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::>(); registration.insert::(FromType::>::from_type()); + registration.insert::(FromType::>::from_type()); registration } } @@ -543,13 +558,28 @@ impl FromReflect for Cow<'static, str> { #[cfg(test)] mod tests { - use crate::Reflect; + use crate::{Reflect, ReflectSerialize, TypeRegistry}; use bevy_utils::HashMap; use std::f32::consts::{PI, TAU}; #[test] fn can_serialize_duration() { - assert!(std::time::Duration::ZERO.serializable().is_some()); + let mut type_registry = TypeRegistry::default(); + type_registry.register::(); + + let reflect_serialize = type_registry + .get_type_data::(std::any::TypeId::of::()) + .unwrap(); + let _serializable = reflect_serialize.get_serializable(&std::time::Duration::ZERO); + } + + #[test] + fn should_partial_eq_char() { + let a: &dyn Reflect = &'x'; + let b: &dyn Reflect = &'x'; + let c: &dyn Reflect = &'o'; + assert!(a.reflect_partial_eq(b).unwrap_or_default()); + assert!(!a.reflect_partial_eq(c).unwrap_or_default()); } #[test] diff --git a/crates/bevy_reflect/src/lib.rs b/crates/bevy_reflect/src/lib.rs index b484e47738cfd..22833c557b871 100644 --- a/crates/bevy_reflect/src/lib.rs +++ b/crates/bevy_reflect/src/lib.rs @@ -1,6 +1,7 @@ #![doc = include_str!("../README.md")] mod array; +mod fields; mod list; mod map; mod path; @@ -8,6 +9,7 @@ mod reflect; mod struct_trait; mod tuple; mod tuple_struct; +mod type_info; mod type_registry; mod type_uuid; mod impls { @@ -26,17 +28,19 @@ mod impls { pub mod serde; pub mod std_traits; +pub mod utility; pub mod prelude { pub use crate::std_traits::*; #[doc(hidden)] pub use crate::{ - reflect_trait, GetField, GetTupleStructField, Reflect, ReflectDeserialize, Struct, - TupleStruct, + reflect_trait, GetField, GetTupleStructField, Reflect, ReflectDeserialize, + ReflectSerialize, Struct, TupleStruct, }; } pub use array::*; +pub use fields::*; pub use impls::*; pub use list::*; pub use map::*; @@ -45,6 +49,7 @@ pub use reflect::*; pub use struct_trait::*; pub use tuple::*; pub use tuple_struct::*; +pub use type_info::*; pub use type_registry::*; pub use type_uuid::*; @@ -92,7 +97,9 @@ mod tests { ser::{to_string_pretty, PrettyConfig}, Deserializer, }; + use std::fmt::{Debug, Formatter}; + use super::prelude::*; use super::*; use crate as bevy_reflect; use crate::serde::{ReflectDeserializer, ReflectSerializer}; @@ -231,6 +238,66 @@ mod tests { assert_eq!(values, vec![1]); } + #[test] + fn from_reflect_should_use_default_field_attributes() { + #[derive(Reflect, FromReflect, Eq, PartialEq, Debug)] + struct MyStruct { + // Use `Default::default()` + // Note that this isn't an ignored field + #[reflect(default)] + foo: String, + + // Use `get_bar_default()` + #[reflect(default = "get_bar_default")] + #[reflect(ignore)] + bar: usize, + } + + fn get_bar_default() -> usize { + 123 + } + + let expected = MyStruct { + foo: String::default(), + bar: 123, + }; + + let dyn_struct = DynamicStruct::default(); + let my_struct = ::from_reflect(&dyn_struct); + + assert_eq!(Some(expected), my_struct); + } + + #[test] + fn from_reflect_should_use_default_container_attribute() { + #[derive(Reflect, FromReflect, Eq, PartialEq, Debug)] + #[reflect(Default)] + struct MyStruct { + foo: String, + #[reflect(ignore)] + bar: usize, + } + + impl Default for MyStruct { + fn default() -> Self { + Self { + foo: String::from("Hello"), + bar: 123, + } + } + } + + let expected = MyStruct { + foo: String::from("Hello"), + bar: 123, + }; + + let dyn_struct = DynamicStruct::default(); + let my_struct = ::from_reflect(&dyn_struct); + + assert_eq!(Some(expected), my_struct); + } + #[test] fn reflect_complex_patch() { #[derive(Reflect, Eq, PartialEq, Debug, FromReflect)] @@ -474,6 +541,237 @@ mod tests { ); } + #[test] + fn reflect_type_info() { + // TypeInfo + let info = i32::type_info(); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!(std::any::TypeId::of::(), info.type_id()); + + // TypeInfo (unsized) + assert_eq!( + std::any::TypeId::of::(), + ::type_info().type_id() + ); + + // TypeInfo (instance) + let value: &dyn Reflect = &123_i32; + let info = value.get_type_info(); + assert!(info.is::()); + + // Struct + #[derive(Reflect)] + struct MyStruct { + foo: i32, + bar: usize, + } + + let info = MyStruct::type_info(); + if let TypeInfo::Struct(info) = info { + assert!(info.is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!( + std::any::type_name::(), + info.field("foo").unwrap().type_name() + ); + assert_eq!( + std::any::TypeId::of::(), + info.field("foo").unwrap().type_id() + ); + assert!(info.field("foo").unwrap().is::()); + assert_eq!("foo", info.field("foo").unwrap().name()); + assert_eq!( + std::any::type_name::(), + info.field_at(1).unwrap().type_name() + ); + } else { + panic!("Expected `TypeInfo::Struct`"); + } + + let value: &dyn Reflect = &MyStruct { foo: 123, bar: 321 }; + let info = value.get_type_info(); + assert!(info.is::()); + + // Struct (generic) + #[derive(Reflect)] + struct MyGenericStruct { + foo: T, + bar: usize, + } + + let info = >::type_info(); + if let TypeInfo::Struct(info) = info { + assert!(info.is::>()); + assert_eq!( + std::any::type_name::>(), + info.type_name() + ); + assert_eq!( + std::any::type_name::(), + info.field("foo").unwrap().type_name() + ); + assert_eq!("foo", info.field("foo").unwrap().name()); + assert_eq!( + std::any::type_name::(), + info.field_at(1).unwrap().type_name() + ); + } else { + panic!("Expected `TypeInfo::Struct`"); + } + + let value: &dyn Reflect = &MyGenericStruct { + foo: String::from("Hello!"), + bar: 321, + }; + let info = value.get_type_info(); + assert!(info.is::>()); + + // Tuple Struct + #[derive(Reflect)] + struct MyTupleStruct(usize, i32, MyStruct); + + let info = MyTupleStruct::type_info(); + if let TypeInfo::TupleStruct(info) = info { + assert!(info.is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!( + std::any::type_name::(), + info.field_at(1).unwrap().type_name() + ); + assert!(info.field_at(1).unwrap().is::()); + } else { + panic!("Expected `TypeInfo::TupleStruct`"); + } + + let value: &dyn Reflect = &MyTupleStruct(123, 321, MyStruct { foo: 123, bar: 321 }); + let info = value.get_type_info(); + assert!(info.is::()); + + // Tuple + type MyTuple = (u32, f32, String); + + let info = MyTuple::type_info(); + if let TypeInfo::Tuple(info) = info { + assert!(info.is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!( + std::any::type_name::(), + info.field_at(1).unwrap().type_name() + ); + } else { + panic!("Expected `TypeInfo::Tuple`"); + } + + let value: &dyn Reflect = &(123_u32, 1.23_f32, String::from("Hello!")); + let info = value.get_type_info(); + assert!(info.is::()); + + // List + type MyList = Vec; + + let info = MyList::type_info(); + if let TypeInfo::List(info) = info { + assert!(info.is::()); + assert!(info.item_is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!(std::any::type_name::(), info.item_type_name()); + } else { + panic!("Expected `TypeInfo::List`"); + } + + let value: &dyn Reflect = &vec![123_usize]; + let info = value.get_type_info(); + assert!(info.is::()); + + // List (SmallVec) + #[cfg(feature = "smallvec")] + { + type MySmallVec = smallvec::SmallVec<[String; 2]>; + + let info = MySmallVec::type_info(); + if let TypeInfo::List(info) = info { + assert!(info.is::()); + assert!(info.item_is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!(std::any::type_name::(), info.item_type_name()); + } else { + panic!("Expected `TypeInfo::List`"); + } + + let value: MySmallVec = smallvec::smallvec![String::default(); 2]; + let value: &dyn Reflect = &value; + let info = value.get_type_info(); + assert!(info.is::()); + } + + // Array + type MyArray = [usize; 3]; + + let info = MyArray::type_info(); + if let TypeInfo::Array(info) = info { + assert!(info.is::()); + assert!(info.item_is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!(std::any::type_name::(), info.item_type_name()); + assert_eq!(3, info.capacity()); + } else { + panic!("Expected `TypeInfo::Array`"); + } + + let value: &dyn Reflect = &[1usize, 2usize, 3usize]; + let info = value.get_type_info(); + assert!(info.is::()); + + // Map + type MyMap = HashMap; + + let info = MyMap::type_info(); + if let TypeInfo::Map(info) = info { + assert!(info.is::()); + assert!(info.key_is::()); + assert!(info.value_is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + assert_eq!(std::any::type_name::(), info.key_type_name()); + assert_eq!(std::any::type_name::(), info.value_type_name()); + } else { + panic!("Expected `TypeInfo::Map`"); + } + + let value: &dyn Reflect = &MyMap::new(); + let info = value.get_type_info(); + assert!(info.is::()); + + // Value + type MyValue = String; + + let info = MyValue::type_info(); + if let TypeInfo::Value(info) = info { + assert!(info.is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + } else { + panic!("Expected `TypeInfo::Value`"); + } + + let value: &dyn Reflect = &String::from("Hello!"); + let info = value.get_type_info(); + assert!(info.is::()); + + // Dynamic + type MyDynamic = DynamicList; + + let info = MyDynamic::type_info(); + if let TypeInfo::Dynamic(info) = info { + assert!(info.is::()); + assert_eq!(std::any::type_name::(), info.type_name()); + } else { + panic!("Expected `TypeInfo::Dynamic`"); + } + + let value: &dyn Reflect = &DynamicList::default(); + let info = value.get_type_info(); + assert!(info.is::()); + } + #[test] fn as_reflect() { trait TestTrait: Reflect {} @@ -489,31 +787,106 @@ mod tests { let _ = trait_object.as_reflect(); } + #[test] + fn should_reflect_debug() { + #[derive(Reflect)] + struct Test { + value: usize, + list: Vec, + array: [f32; 3], + map: HashMap, + a_struct: SomeStruct, + a_tuple_struct: SomeTupleStruct, + custom: CustomDebug, + unknown: Option, + #[reflect(ignore)] + #[allow(dead_code)] + ignored: isize, + } + + #[derive(Reflect)] + struct SomeStruct { + foo: String, + } + + #[derive(Reflect)] + struct SomeTupleStruct(String); + + #[derive(Reflect)] + #[reflect(Debug)] + struct CustomDebug; + impl Debug for CustomDebug { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("Cool debug!") + } + } + + let mut map = HashMap::new(); + map.insert(123, 1.23); + + let test = Test { + value: 123, + list: vec![String::from("A"), String::from("B"), String::from("C")], + array: [1.0, 2.0, 3.0], + map, + a_struct: SomeStruct { + foo: String::from("A Struct!"), + }, + a_tuple_struct: SomeTupleStruct(String::from("A Tuple Struct!")), + custom: CustomDebug, + unknown: Some(String::from("Enums aren't supported yet :(")), + ignored: 321, + }; + + let reflected: &dyn Reflect = &test; + let expected = r#" +bevy_reflect::tests::should_reflect_debug::Test { + value: 123, + list: [ + "A", + "B", + "C", + ], + array: [ + 1.0, + 2.0, + 3.0, + ], + map: { + 123: 1.23, + }, + a_struct: bevy_reflect::tests::should_reflect_debug::SomeStruct { + foo: "A Struct!", + }, + a_tuple_struct: bevy_reflect::tests::should_reflect_debug::SomeTupleStruct( + "A Tuple Struct!", + ), + custom: Cool debug!, + unknown: Reflect(core::option::Option), +}"#; + + assert_eq!(expected, format!("\n{:#?}", reflected)); + } + #[cfg(feature = "glam")] mod glam { use super::*; - use ::serde::Serialize; #[test] fn vec3_serialization() { let v = vec3(12.0, 3.0, -6.9); let mut registry = TypeRegistry::default(); - registry.add_registration(Vec3::get_type_registration()); + registry.register::(); + registry.register::(); let ser = ReflectSerializer::new(&v, ®istry); - let mut dest = vec![]; - let mut serializer = ron::ser::Serializer::new(&mut dest, None, false) - .expect("Failed to acquire serializer"); - - ser.serialize(&mut serializer).expect("Failed to serialize"); - - let result = String::from_utf8(dest).expect("Failed to convert to string"); + let result = ron::to_string(&ser).expect("Failed to serialize to string"); assert_eq!( result, - r#"{"type":"glam::vec3::Vec3","struct":{"x":{"type":"f32","value":12},"y":{"type":"f32","value":3},"z":{"type":"f32","value":-6.9}}}"# + r#"{"type":"glam::vec3::Vec3","struct":{"x":{"type":"f32","value":12.0},"y":{"type":"f32","value":3.0},"z":{"type":"f32","value":-6.9}}}"# ); } diff --git a/crates/bevy_reflect/src/list.rs b/crates/bevy_reflect/src/list.rs index f2927556eadd3..6e86e453be9a1 100644 --- a/crates/bevy_reflect/src/list.rs +++ b/crates/bevy_reflect/src/list.rs @@ -1,6 +1,11 @@ -use std::any::Any; +use std::any::{Any, TypeId}; +use std::fmt::{Debug, Formatter}; -use crate::{serde::Serializable, Array, ArrayIter, DynamicArray, Reflect, ReflectMut, ReflectRef}; +use crate::utility::NonGenericTypeInfoCell; +use crate::{ + Array, ArrayIter, DynamicArray, DynamicInfo, FromReflect, Reflect, ReflectMut, ReflectRef, + TypeInfo, Typed, +}; /// An ordered, mutable list of [Reflect] items. This corresponds to types like [`std::vec::Vec`]. /// @@ -19,6 +24,61 @@ pub trait List: Reflect + Array { } } +/// A container for compile-time list info. +#[derive(Clone, Debug)] +pub struct ListInfo { + type_name: &'static str, + type_id: TypeId, + item_type_name: &'static str, + item_type_id: TypeId, +} + +impl ListInfo { + /// Create a new [`ListInfo`]. + pub fn new() -> Self { + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + item_type_name: std::any::type_name::(), + item_type_id: TypeId::of::(), + } + } + + /// The [type name] of the list. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the list. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the list type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } + + /// The [type name] of the list item. + /// + /// [type name]: std::any::type_name + pub fn item_type_name(&self) -> &'static str { + self.item_type_name + } + + /// The [`TypeId`] of the list item. + pub fn item_type_id(&self) -> TypeId { + self.item_type_id + } + + /// Check if the given type matches the list item type. + pub fn item_is(&self) -> bool { + TypeId::of::() == self.item_type_id + } +} + /// A list of reflected values. #[derive(Default)] pub struct DynamicList { @@ -110,6 +170,11 @@ unsafe impl Reflect for DynamicList { self.name.as_str() } + #[inline] + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn Any { self @@ -164,17 +229,23 @@ unsafe impl Reflect for DynamicList { list_partial_eq(self, value) } - fn serializable(&self) -> Option { - Some(Serializable::Borrowed(self)) + fn debug(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "DynamicList(")?; + list_debug(self, f)?; + write!(f, ")") + } +} + +impl Debug for DynamicList { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.debug(f) } } -impl serde::Serialize for DynamicList { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - crate::array_serialize(self, serializer) +impl Typed for DynamicList { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Dynamic(DynamicInfo::new::())) } } @@ -239,6 +310,32 @@ pub fn list_partial_eq(a: &L, b: &dyn Reflect) -> Option { Some(true) } +/// The default debug formatter for [`List`] types. +/// +/// # Example +/// ``` +/// use bevy_reflect::Reflect; +/// +/// let my_list: &dyn Reflect = &vec![1, 2, 3]; +/// println!("{:#?}", my_list); +/// +/// // Output: +/// +/// // [ +/// // 1, +/// // 2, +/// // 3, +/// // ] +/// ``` +#[inline] +pub fn list_debug(dyn_list: &dyn List, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug = f.debug_list(); + for item in dyn_list.iter() { + debug.entry(&item as &dyn Debug); + } + debug.finish() +} + #[cfg(test)] mod tests { use super::DynamicList; diff --git a/crates/bevy_reflect/src/map.rs b/crates/bevy_reflect/src/map.rs index f67683c1b661a..88f0f63a1c85f 100644 --- a/crates/bevy_reflect/src/map.rs +++ b/crates/bevy_reflect/src/map.rs @@ -1,8 +1,11 @@ -use std::any::Any; +use std::any::{Any, TypeId}; +use std::fmt::{Debug, Formatter}; +use std::hash::Hash; use bevy_utils::{Entry, HashMap}; -use crate::{serde::Serializable, Reflect, ReflectMut, ReflectRef}; +use crate::utility::NonGenericTypeInfoCell; +use crate::{DynamicInfo, Reflect, ReflectMut, ReflectRef, TypeInfo, Typed}; /// An ordered mapping between [`Reflect`] values. /// @@ -43,6 +46,82 @@ pub trait Map: Reflect { fn clone_dynamic(&self) -> DynamicMap; } +/// A container for compile-time map info. +#[derive(Clone, Debug)] +pub struct MapInfo { + type_name: &'static str, + type_id: TypeId, + key_type_name: &'static str, + key_type_id: TypeId, + value_type_name: &'static str, + value_type_id: TypeId, +} + +impl MapInfo { + /// Create a new [`MapInfo`]. + pub fn new() -> Self { + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + key_type_name: std::any::type_name::(), + key_type_id: TypeId::of::(), + value_type_name: std::any::type_name::(), + value_type_id: TypeId::of::(), + } + } + + /// The [type name] of the map. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the map. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the map type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } + + /// The [type name] of the key. + /// + /// [type name]: std::any::type_name + pub fn key_type_name(&self) -> &'static str { + self.key_type_name + } + + /// The [`TypeId`] of the key. + pub fn key_type_id(&self) -> TypeId { + self.key_type_id + } + + /// Check if the given type matches the key type. + pub fn key_is(&self) -> bool { + TypeId::of::() == self.key_type_id + } + + /// The [type name] of the value. + /// + /// [type name]: std::any::type_name + pub fn value_type_name(&self) -> &'static str { + self.value_type_name + } + + /// The [`TypeId`] of the value. + pub fn value_type_id(&self) -> TypeId { + self.value_type_id + } + + /// Check if the given type matches the value type. + pub fn value_is(&self) -> bool { + TypeId::of::() == self.value_type_id + } +} + const HASH_ERROR: &str = "the given key does not support hashing"; /// An ordered mapping between reflected values. @@ -139,6 +218,11 @@ unsafe impl Reflect for DynamicMap { &self.name } + #[inline] + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + fn any(&self) -> &dyn Any { self } @@ -186,16 +270,27 @@ unsafe impl Reflect for DynamicMap { Box::new(self.clone_dynamic()) } - fn reflect_hash(&self) -> Option { - None - } - fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { map_partial_eq(self, value) } - fn serializable(&self) -> Option { - None + fn debug(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "DynamicMap(")?; + map_debug(self, f)?; + write!(f, ")") + } +} + +impl Debug for DynamicMap { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.debug(f) + } +} + +impl Typed for DynamicMap { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Dynamic(DynamicInfo::new::())) } } @@ -263,6 +358,32 @@ pub fn map_partial_eq(a: &M, b: &dyn Reflect) -> Option { Some(true) } +/// The default debug formatter for [`Map`] types. +/// +/// # Example +/// ``` +/// # use bevy_utils::HashMap; +/// use bevy_reflect::Reflect; +/// +/// let mut my_map = HashMap::new(); +/// my_map.insert(123, String::from("Hello")); +/// println!("{:#?}", &my_map as &dyn Reflect); +/// +/// // Output: +/// +/// // { +/// // 123: "Hello", +/// // } +/// ``` +#[inline] +pub fn map_debug(dyn_map: &dyn Map, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug = f.debug_map(); + for (key, value) in dyn_map.iter() { + debug.entry(&key as &dyn Debug, &value as &dyn Debug); + } + debug.finish() +} + #[cfg(test)] mod tests { use super::DynamicMap; diff --git a/crates/bevy_reflect/src/reflect.rs b/crates/bevy_reflect/src/reflect.rs index aeb72669faab9..a0da96c189e5f 100644 --- a/crates/bevy_reflect/src/reflect.rs +++ b/crates/bevy_reflect/src/reflect.rs @@ -1,6 +1,10 @@ -use crate::{serde::Serializable, Array, List, Map, Struct, Tuple, TupleStruct}; +use crate::{ + array_debug, list_debug, map_debug, serde::Serializable, struct_debug, tuple_debug, + tuple_struct_debug, Array, List, Map, Struct, Tuple, TupleStruct, TypeInfo, Typed, ValueInfo, +}; use std::{any::Any, fmt::Debug}; +use crate::utility::NonGenericTypeInfoCell; pub use bevy_utils::AHasher as ReflectHasher; /// An immutable enumeration of "kinds" of reflected type. @@ -53,6 +57,16 @@ pub unsafe trait Reflect: Any + Send + Sync { /// [type name]: std::any::type_name fn type_name(&self) -> &str; + /// Returns the [`TypeInfo`] of the underlying type. + /// + /// This method is great if you have an instance of a type or a `dyn Reflect`, + /// and want to access its [`TypeInfo`]. However, if this method is to be called + /// frequently, consider using [`TypeRegistry::get_type_info`] as it can be more + /// performant for such use cases. + /// + /// [`TypeRegistry::get_type_info`]: crate::TypeRegistry::get_type_info + fn get_type_info(&self) -> &'static TypeInfo; + /// Returns the value as a [`&dyn Any`][std::any::Any]. fn any(&self) -> &dyn Any; @@ -130,17 +144,42 @@ pub unsafe trait Reflect: Any + Send + Sync { /// Returns a hash of the value (which includes the type). /// /// If the underlying type does not support hashing, returns `None`. - fn reflect_hash(&self) -> Option; + fn reflect_hash(&self) -> Option { + None + } /// Returns a "partial equality" comparison result. /// /// If the underlying type does not support equality testing, returns `None`. - fn reflect_partial_eq(&self, _value: &dyn Reflect) -> Option; + fn reflect_partial_eq(&self, _value: &dyn Reflect) -> Option { + None + } + + /// Debug formatter for the value. + /// + /// Any value that is not an implementor of other `Reflect` subtraits + /// (e.g. [`List`], [`Map`]), will default to the format: `"Reflect(type_name)"`, + /// where `type_name` is the [type name] of the underlying type. + /// + /// [type name]: Self::type_name + fn debug(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.reflect_ref() { + ReflectRef::Struct(dyn_struct) => struct_debug(dyn_struct, f), + ReflectRef::TupleStruct(dyn_tuple_struct) => tuple_struct_debug(dyn_tuple_struct, f), + ReflectRef::Tuple(dyn_tuple) => tuple_debug(dyn_tuple, f), + ReflectRef::List(dyn_list) => list_debug(dyn_list, f), + ReflectRef::Array(dyn_array) => array_debug(dyn_array, f), + ReflectRef::Map(dyn_map) => map_debug(dyn_map, f), + _ => write!(f, "Reflect({})", self.type_name()), + } + } /// Returns a serializable version of the value. /// /// If the underlying type does not support serialization, returns `None`. - fn serializable(&self) -> Option; + fn serializable(&self) -> Option { + None + } } /// A trait for types which can be constructed from a reflected type. @@ -160,7 +199,14 @@ pub trait FromReflect: Reflect + Sized { impl Debug for dyn Reflect { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Reflect({})", self.type_name()) + self.debug(f) + } +} + +impl Typed for dyn Reflect { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Value(ValueInfo::new::())) } } diff --git a/crates/bevy_reflect/src/serde/ser.rs b/crates/bevy_reflect/src/serde/ser.rs index 180bac1c81069..de0ad7760a6fa 100644 --- a/crates/bevy_reflect/src/serde/ser.rs +++ b/crates/bevy_reflect/src/serde/ser.rs @@ -1,6 +1,6 @@ use crate::{ - serde::type_fields, Array, List, Map, Reflect, ReflectRef, Struct, Tuple, TupleStruct, - TypeRegistry, + serde::type_fields, Array, List, Map, Reflect, ReflectRef, ReflectSerialize, Struct, Tuple, + TupleStruct, TypeRegistry, }; use serde::{ ser::{SerializeMap, SerializeSeq}, @@ -22,13 +22,19 @@ impl<'a> Serializable<'a> { } } -fn get_serializable(reflect_value: &dyn Reflect) -> Result { - reflect_value.serializable().ok_or_else(|| { - serde::ser::Error::custom(format_args!( - "Type '{}' does not support ReflectValue serialization", - reflect_value.type_name() - )) - }) +fn get_serializable<'a, E: serde::ser::Error>( + reflect_value: &'a dyn Reflect, + type_registry: &TypeRegistry, +) -> Result, E> { + let reflect_serialize = type_registry + .get_type_data::(reflect_value.type_id()) + .ok_or_else(|| { + serde::ser::Error::custom(format_args!( + "Type '{}' did not register ReflectSerialize", + reflect_value.type_name() + )) + })?; + Ok(reflect_serialize.get_serializable(reflect_value)) } pub struct ReflectSerializer<'a> { @@ -101,7 +107,7 @@ impl<'a> Serialize for ReflectValueSerializer<'a> { state.serialize_entry(type_fields::TYPE, self.value.type_name())?; state.serialize_entry( type_fields::VALUE, - get_serializable::(self.value)?.borrow(), + get_serializable::(self.value, self.registry)?.borrow(), )?; state.end() } diff --git a/crates/bevy_reflect/src/struct_trait.rs b/crates/bevy_reflect/src/struct_trait.rs index f1941ce1bbac2..ac06e4a5b0469 100644 --- a/crates/bevy_reflect/src/struct_trait.rs +++ b/crates/bevy_reflect/src/struct_trait.rs @@ -1,6 +1,12 @@ -use crate::{serde::Serializable, Reflect, ReflectMut, ReflectRef}; +use crate::utility::NonGenericTypeInfoCell; +use crate::{DynamicInfo, NamedField, Reflect, ReflectMut, ReflectRef, TypeInfo, Typed}; use bevy_utils::{Entry, HashMap}; -use std::{any::Any, borrow::Cow}; +use std::fmt::{Debug, Formatter}; +use std::{ + any::{Any, TypeId}, + borrow::Cow, + slice::Iter, +}; /// A reflected Rust regular struct type. /// @@ -60,6 +66,85 @@ pub trait Struct: Reflect { fn clone_dynamic(&self) -> DynamicStruct; } +/// A container for compile-time struct info. +#[derive(Clone, Debug)] +pub struct StructInfo { + type_name: &'static str, + type_id: TypeId, + fields: Box<[NamedField]>, + field_indices: HashMap, usize>, +} + +impl StructInfo { + /// Create a new [`StructInfo`]. + /// + /// # Arguments + /// + /// * `fields`: The fields of this struct in the order they are defined + /// + pub fn new(fields: &[NamedField]) -> Self { + let field_indices = fields + .iter() + .enumerate() + .map(|(index, field)| { + let name = field.name().clone(); + (name, index) + }) + .collect::>(); + + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + fields: fields.to_vec().into_boxed_slice(), + field_indices, + } + } + + /// Get the field with the given name. + pub fn field(&self, name: &str) -> Option<&NamedField> { + self.field_indices + .get(name) + .map(|index| &self.fields[*index]) + } + + /// Get the field at the given index. + pub fn field_at(&self, index: usize) -> Option<&NamedField> { + self.fields.get(index) + } + + /// Get the index of the field with the given name. + pub fn index_of(&self, name: &str) -> Option { + self.field_indices.get(name).copied() + } + + /// Iterate over the fields of this struct. + pub fn iter(&self) -> Iter<'_, NamedField> { + self.fields.iter() + } + + /// The total number of fields in this struct. + pub fn field_len(&self) -> usize { + self.fields.len() + } + + /// The [type name] of the struct. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the struct. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the struct type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } +} + /// An iterator over the field values of a struct. pub struct FieldIter<'a> { pub(crate) struct_val: &'a dyn Struct, @@ -259,6 +344,11 @@ unsafe impl Reflect for DynamicStruct { &self.name } + #[inline] + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn Any { self @@ -312,16 +402,27 @@ unsafe impl Reflect for DynamicStruct { Ok(()) } - fn reflect_hash(&self) -> Option { - None - } - fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { struct_partial_eq(self, value) } - fn serializable(&self) -> Option { - None + fn debug(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "DynamicStruct(")?; + struct_debug(self, f)?; + write!(f, ")") + } +} + +impl Debug for DynamicStruct { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.debug(f) + } +} + +impl Typed for DynamicStruct { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Dynamic(DynamicInfo::new::())) } } @@ -357,3 +458,35 @@ pub fn struct_partial_eq(a: &S, b: &dyn Reflect) -> Option { Some(true) } + +/// The default debug formatter for [`Struct`] types. +/// +/// # Example +/// ``` +/// use bevy_reflect::Reflect; +/// #[derive(Reflect)] +/// struct MyStruct { +/// foo: usize +/// } +/// +/// let my_struct: &dyn Reflect = &MyStruct { foo: 123 }; +/// println!("{:#?}", my_struct); +/// +/// // Output: +/// +/// // MyStruct { +/// // foo: 123, +/// // } +/// ``` +#[inline] +pub fn struct_debug(dyn_struct: &dyn Struct, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug = f.debug_struct(dyn_struct.type_name()); + for field_index in 0..dyn_struct.field_len() { + let field = dyn_struct.field_at(field_index).unwrap(); + debug.field( + dyn_struct.name_at(field_index).unwrap(), + &field as &dyn Debug, + ); + } + debug.finish() +} diff --git a/crates/bevy_reflect/src/tuple.rs b/crates/bevy_reflect/src/tuple.rs index e62fc2bc8c8ac..bab117e5e5d6a 100644 --- a/crates/bevy_reflect/src/tuple.rs +++ b/crates/bevy_reflect/src/tuple.rs @@ -1,9 +1,12 @@ +use crate::utility::NonGenericTypeInfoCell; use crate::{ - serde::Serializable, FromReflect, FromType, GetTypeRegistration, Reflect, ReflectDeserialize, - ReflectMut, ReflectRef, TypeRegistration, + DynamicInfo, FromReflect, FromType, GetTypeRegistration, Reflect, ReflectDeserialize, + ReflectMut, ReflectRef, TypeInfo, TypeRegistration, Typed, UnnamedField, }; use serde::Deserialize; -use std::any::Any; +use std::any::{Any, TypeId}; +use std::fmt::{Debug, Formatter}; +use std::slice::Iter; /// A reflected Rust tuple. /// @@ -123,6 +126,62 @@ impl GetTupleField for dyn Tuple { } } +/// A container for compile-time tuple info. +#[derive(Clone, Debug)] +pub struct TupleInfo { + type_name: &'static str, + type_id: TypeId, + fields: Box<[UnnamedField]>, +} + +impl TupleInfo { + /// Create a new [`TupleInfo`]. + /// + /// # Arguments + /// + /// * `fields`: The fields of this tuple in the order they are defined + /// + pub fn new(fields: &[UnnamedField]) -> Self { + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + fields: fields.to_vec().into_boxed_slice(), + } + } + + /// Get the field at the given index. + pub fn field_at(&self, index: usize) -> Option<&UnnamedField> { + self.fields.get(index) + } + + /// Iterate over the fields of this tuple. + pub fn iter(&self) -> Iter<'_, UnnamedField> { + self.fields.iter() + } + + /// The total number of fields in this tuple. + pub fn field_len(&self) -> usize { + self.fields.len() + } + + /// The [type name] of the tuple. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the tuple. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the tuple type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } +} + /// A tuple which allows fields to be added at runtime. #[derive(Default)] pub struct DynamicTuple { @@ -215,6 +274,11 @@ unsafe impl Reflect for DynamicTuple { self.name() } + #[inline] + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn Any { self @@ -259,16 +323,21 @@ unsafe impl Reflect for DynamicTuple { Ok(()) } - fn reflect_hash(&self) -> Option { - None - } - fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { tuple_partial_eq(self, value) } - fn serializable(&self) -> Option { - None + fn debug(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "DynamicTuple(")?; + tuple_debug(self, f)?; + write!(f, ")") + } +} + +impl Typed for DynamicTuple { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Dynamic(DynamicInfo::new::())) } } @@ -318,6 +387,32 @@ pub fn tuple_partial_eq(a: &T, b: &dyn Reflect) -> Option { Some(true) } +/// The default debug formatter for [`Tuple`] types. +/// +/// # Example +/// ``` +/// use bevy_reflect::Reflect; +/// +/// let my_tuple: &dyn Reflect = &(1, 2, 3); +/// println!("{:#?}", my_tuple); +/// +/// // Output: +/// +/// // ( +/// // 1, +/// // 2, +/// // 3, +/// // ) +/// ``` +#[inline] +pub fn tuple_debug(dyn_tuple: &dyn Tuple, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut debug = f.debug_tuple(""); + for field in dyn_tuple.iter_fields() { + debug.field(&field as &dyn Debug); + } + debug.finish() +} + macro_rules! impl_reflect_tuple { {$($index:tt : $name:tt),*} => { impl<$($name: Reflect),*> Tuple for ($($name,)*) { @@ -371,6 +466,10 @@ macro_rules! impl_reflect_tuple { std::any::type_name::() } + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + fn any(&self) -> &dyn Any { self } @@ -408,20 +507,25 @@ macro_rules! impl_reflect_tuple { Box::new(self.clone_dynamic()) } - fn reflect_hash(&self) -> Option { - None - } - fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { crate::tuple_partial_eq(self, value) } + } - fn serializable(&self) -> Option { - None + impl <$($name: Reflect),*> Typed for ($($name,)*) { + fn type_info() -> &'static TypeInfo { + static CELL: $crate::utility::GenericTypeInfoCell = $crate::utility::GenericTypeInfoCell::new(); + CELL.get_or_insert::(|| { + let fields = [ + $(UnnamedField::new::<$name>($index),)* + ]; + let info = TupleInfo::new::(&fields); + TypeInfo::Tuple(info) + }) } } - impl<$($name: Reflect + for<'de> Deserialize<'de>),*> GetTypeRegistration for ($($name,)*) { + impl<$($name: Reflect + Typed + for<'de> Deserialize<'de>),*> GetTypeRegistration for ($($name,)*) { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::<($($name,)*)>(); registration.insert::(FromType::<($($name,)*)>::from_type()); diff --git a/crates/bevy_reflect/src/tuple_struct.rs b/crates/bevy_reflect/src/tuple_struct.rs index 09f484ca15644..e3a8afa8a8bb6 100644 --- a/crates/bevy_reflect/src/tuple_struct.rs +++ b/crates/bevy_reflect/src/tuple_struct.rs @@ -1,5 +1,8 @@ -use crate::{serde::Serializable, Reflect, ReflectMut, ReflectRef}; -use std::any::Any; +use crate::utility::NonGenericTypeInfoCell; +use crate::{DynamicInfo, Reflect, ReflectMut, ReflectRef, TypeInfo, Typed, UnnamedField}; +use std::any::{Any, TypeId}; +use std::fmt::{Debug, Formatter}; +use std::slice::Iter; /// A reflected Rust tuple struct. /// @@ -43,6 +46,62 @@ pub trait TupleStruct: Reflect { fn clone_dynamic(&self) -> DynamicTupleStruct; } +/// A container for compile-time tuple struct info. +#[derive(Clone, Debug)] +pub struct TupleStructInfo { + type_name: &'static str, + type_id: TypeId, + fields: Box<[UnnamedField]>, +} + +impl TupleStructInfo { + /// Create a new [`TupleStructInfo`]. + /// + /// # Arguments + /// + /// * `fields`: The fields of this struct in the order they are defined + /// + pub fn new(fields: &[UnnamedField]) -> Self { + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + fields: fields.to_vec().into_boxed_slice(), + } + } + + /// Get the field at the given index. + pub fn field_at(&self, index: usize) -> Option<&UnnamedField> { + self.fields.get(index) + } + + /// Iterate over the fields of this struct. + pub fn iter(&self) -> Iter<'_, UnnamedField> { + self.fields.iter() + } + + /// The total number of fields in this struct. + pub fn field_len(&self) -> usize { + self.fields.len() + } + + /// The [type name] of the tuple struct. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the tuple struct. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the tuple struct type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } +} + /// An iterator over the field values of a tuple struct. pub struct TupleStructFieldIter<'a> { pub(crate) tuple_struct: &'a dyn TupleStruct, @@ -199,6 +258,11 @@ unsafe impl Reflect for DynamicTupleStruct { self.name.as_str() } + #[inline] + fn get_type_info(&self) -> &'static TypeInfo { + ::type_info() + } + #[inline] fn any(&self) -> &dyn Any { self @@ -251,16 +315,27 @@ unsafe impl Reflect for DynamicTupleStruct { Ok(()) } - fn reflect_hash(&self) -> Option { - None - } - fn reflect_partial_eq(&self, value: &dyn Reflect) -> Option { tuple_struct_partial_eq(self, value) } - fn serializable(&self) -> Option { - None + fn debug(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "DynamicTupleStruct(")?; + tuple_struct_debug(self, f)?; + write!(f, ")") + } +} + +impl Debug for DynamicTupleStruct { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.debug(f) + } +} + +impl Typed for DynamicTupleStruct { + fn type_info() -> &'static TypeInfo { + static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); + CELL.get_or_set(|| TypeInfo::Dynamic(DynamicInfo::new::())) } } @@ -294,3 +369,32 @@ pub fn tuple_struct_partial_eq(a: &S, b: &dyn Reflect) -> Option Some(true) } + +/// The default debug formatter for [`TupleStruct`] types. +/// +/// # Example +/// ``` +/// use bevy_reflect::Reflect; +/// #[derive(Reflect)] +/// struct MyTupleStruct(usize); +/// +/// let my_tuple_struct: &dyn Reflect = &MyTupleStruct(123); +/// println!("{:#?}", my_tuple_struct); +/// +/// // Output: +/// +/// // MyTupleStruct ( +/// // 123, +/// // ) +/// ``` +#[inline] +pub fn tuple_struct_debug( + dyn_tuple_struct: &dyn TupleStruct, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + let mut debug = f.debug_tuple(dyn_tuple_struct.type_name()); + for field in dyn_tuple_struct.iter_fields() { + debug.field(&field as &dyn Debug); + } + debug.finish() +} diff --git a/crates/bevy_reflect/src/type_info.rs b/crates/bevy_reflect/src/type_info.rs new file mode 100644 index 0000000000000..2d1d1f5327525 --- /dev/null +++ b/crates/bevy_reflect/src/type_info.rs @@ -0,0 +1,223 @@ +use crate::{ArrayInfo, ListInfo, MapInfo, Reflect, StructInfo, TupleInfo, TupleStructInfo}; +use std::any::{Any, TypeId}; + +/// A static accessor to compile-time type information. +/// +/// This trait is automatically implemented by the `#[derive(Reflect)]` macro +/// and allows type information to be processed without an instance of that type. +/// +/// # Implementing +/// +/// While it is recommended to leave implementing this trait to the `#[derive(Reflect)]` macro, +/// it is possible to implement this trait manually. If a manual implementation is needed, +/// you _must_ ensure that the information you provide is correct, otherwise various systems that +/// rely on this trait may fail in unexpected ways. +/// +/// Implementors may have difficulty in generating a reference to [`TypeInfo`] with a static +/// lifetime. Luckily, this crate comes with some [utility] structs, to make generating these +/// statics much simpler. +/// +/// # Example +/// +/// ``` +/// # use std::any::Any; +/// # use bevy_reflect::{NamedField, Reflect, ReflectMut, ReflectRef, StructInfo, TypeInfo, ValueInfo}; +/// # use bevy_reflect::utility::NonGenericTypeInfoCell; +/// use bevy_reflect::Typed; +/// +/// struct MyStruct { +/// foo: usize, +/// bar: (f32, f32) +/// } +/// +/// impl Typed for MyStruct { +/// fn type_info() -> &'static TypeInfo { +/// static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); +/// CELL.get_or_set(|| { +/// let fields = [ +/// NamedField::new::("foo"), +/// NamedField::new::<(f32, f32), _>("bar"), +/// ]; +/// let info = StructInfo::new::(&fields); +/// TypeInfo::Struct(info) +/// }) +/// } +/// } +/// +/// # +/// # unsafe impl Reflect for MyStruct { +/// # fn type_name(&self) -> &str { todo!() } +/// # fn get_type_info(&self) -> &'static TypeInfo { todo!() } +/// # fn any(&self) -> &dyn Any { todo!() } +/// # fn any_mut(&mut self) -> &mut dyn Any { todo!() } +/// # fn as_reflect(&self) -> &dyn Reflect { todo!() } +/// # fn as_reflect_mut(&mut self) -> &mut dyn Reflect { todo!() } +/// # fn apply(&mut self, value: &dyn Reflect) { todo!() } +/// # fn set(&mut self, value: Box) -> Result<(), Box> { todo!() } +/// # fn reflect_ref(&self) -> ReflectRef { todo!() } +/// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } +/// # fn clone_value(&self) -> Box { todo!() } +/// # } +/// ``` +/// +/// [utility]: crate::utility +pub trait Typed: Reflect { + /// Returns the compile-time [info] for the underlying type. + /// + /// [info]: TypeInfo + fn type_info() -> &'static TypeInfo; +} + +/// Compile-time type information for various reflected types. +/// +/// Generally, for any given type, this value can be retrieved one of three ways: +/// +/// 1. [`Typed::type_info`] +/// 2. [`Reflect::get_type_info`] +/// 3. [`TypeRegistry::get_type_info`] +/// +/// Each return a static reference to [`TypeInfo`], but they all have their own use cases. +/// For example, if you know the type at compile time, [`Typed::type_info`] is probably +/// the simplest. If all you have is a `dyn Reflect`, you'll probably want [`Reflect::get_type_info`]. +/// Lastly, if all you have is a [`TypeId`] or [type name], you will need to go through +/// [`TypeRegistry::get_type_info`]. +/// +/// You may also opt to use [`TypeRegistry::get_type_info`] in place of the other methods simply because +/// it can be more performant. This is because those other methods may require attaining a lock on +/// the static [`TypeInfo`], while the registry simply checks a map. +/// +/// [`Reflect::get_type_info`]: crate::Reflect::get_type_info +/// [`TypeRegistry::get_type_info`]: crate::TypeRegistry::get_type_info +/// [`TypeId`]: std::any::TypeId +/// [type name]: std::any::type_name +#[derive(Debug, Clone)] +pub enum TypeInfo { + Struct(StructInfo), + TupleStruct(TupleStructInfo), + Tuple(TupleInfo), + List(ListInfo), + Array(ArrayInfo), + Map(MapInfo), + Value(ValueInfo), + /// Type information for "dynamic" types whose metadata can't be known at compile-time. + /// + /// This includes structs like [`DynamicStruct`](crate::DynamicStruct) and [`DynamicList`](crate::DynamicList). + Dynamic(DynamicInfo), +} + +impl TypeInfo { + /// The [`TypeId`] of the underlying type. + pub fn type_id(&self) -> TypeId { + match self { + Self::Struct(info) => info.type_id(), + Self::TupleStruct(info) => info.type_id(), + Self::Tuple(info) => info.type_id(), + Self::List(info) => info.type_id(), + Self::Array(info) => info.type_id(), + Self::Map(info) => info.type_id(), + Self::Value(info) => info.type_id(), + Self::Dynamic(info) => info.type_id(), + } + } + + /// The [name] of the underlying type. + /// + /// [name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + match self { + Self::Struct(info) => info.type_name(), + Self::TupleStruct(info) => info.type_name(), + Self::Tuple(info) => info.type_name(), + Self::List(info) => info.type_name(), + Self::Array(info) => info.type_name(), + Self::Map(info) => info.type_name(), + Self::Value(info) => info.type_name(), + Self::Dynamic(info) => info.type_name(), + } + } + + /// Check if the given type matches the underlying type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id() + } +} + +/// A container for compile-time info related to general value types, including primitives. +/// +/// This typically represents a type which cannot be broken down any further. This is often +/// due to technical reasons (or by definition), but it can also be a purposeful choice. +/// +/// For example, [`i32`] cannot be broken down any further, so it is represented by a [`ValueInfo`]. +/// And while [`String`] itself is a struct, it's fields are private, so we don't really treat +/// it _as_ a struct. It therefore makes more sense to represent it as a [`ValueInfo`]. +#[derive(Debug, Clone)] +pub struct ValueInfo { + type_name: &'static str, + type_id: TypeId, +} + +impl ValueInfo { + pub fn new() -> Self { + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + } + } + + /// The [type name] of the value. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the value. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the value type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } +} + +/// A container for compile-time info related to Bevy's _dynamic_ types, including primitives. +/// +/// This is functionally the same as [`ValueInfo`], however, semantically it refers to dynamic +/// types such as [`DynamicStruct`], [`DynamicTuple`], [`DynamicList`], etc. +/// +/// [`DynamicStruct`]: crate::DynamicStruct +/// [`DynamicTuple`]: crate::DynamicTuple +/// [`DynamicList`]: crate::DynamicList +#[derive(Debug, Clone)] +pub struct DynamicInfo { + type_name: &'static str, + type_id: TypeId, +} + +impl DynamicInfo { + pub fn new() -> Self { + Self { + type_name: std::any::type_name::(), + type_id: TypeId::of::(), + } + } + + /// The [type name] of the dynamic value. + /// + /// [type name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_name + } + + /// The [`TypeId`] of the dynamic value. + pub fn type_id(&self) -> TypeId { + self.type_id + } + + /// Check if the given type matches the dynamic value type. + pub fn is(&self) -> bool { + TypeId::of::() == self.type_id + } +} diff --git a/crates/bevy_reflect/src/type_registry.rs b/crates/bevy_reflect/src/type_registry.rs index 15e20027255b2..f793ab6f8fca4 100644 --- a/crates/bevy_reflect/src/type_registry.rs +++ b/crates/bevy_reflect/src/type_registry.rs @@ -1,4 +1,5 @@ -use crate::Reflect; +use crate::serde::Serializable; +use crate::{Reflect, TypeInfo, Typed}; use bevy_utils::{HashMap, HashSet}; use downcast_rs::{impl_downcast, Downcast}; use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -6,7 +7,6 @@ use serde::Deserialize; use std::{any::TypeId, fmt::Debug, sync::Arc}; /// A registry of reflected types. -#[derive(Default)] pub struct TypeRegistry { registrations: HashMap, short_name_to_id: HashMap, @@ -35,7 +35,44 @@ pub trait GetTypeRegistration { fn get_type_registration() -> TypeRegistration; } +impl Default for TypeRegistry { + fn default() -> Self { + Self::new() + } +} + impl TypeRegistry { + /// Create a type registry with *no* registered types. + pub fn empty() -> Self { + Self { + registrations: Default::default(), + short_name_to_id: Default::default(), + full_name_to_id: Default::default(), + ambiguous_names: Default::default(), + } + } + + /// Create a type registry with default registrations for primitive types. + pub fn new() -> Self { + let mut registry = Self::empty(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry.register::(); + registry + } + /// Registers the type `T`. pub fn register(&mut self) where @@ -55,12 +92,12 @@ impl TypeRegistry { self.ambiguous_names.insert(short_name); } else { self.short_name_to_id - .insert(short_name, registration.type_id); + .insert(short_name, registration.type_id()); } self.full_name_to_id - .insert(registration.name.to_string(), registration.type_id); + .insert(registration.type_name().to_string(), registration.type_id()); self.registrations - .insert(registration.type_id, registration); + .insert(registration.type_id(), registration); } /// Returns a reference to the [`TypeRegistration`] of the type with the @@ -152,6 +189,14 @@ impl TypeRegistry { .and_then(|registration| registration.data_mut::()) } + /// Returns the [`TypeInfo`] associated with the given `TypeId`. + /// + /// If the specified type has not been registered, returns `None`. + pub fn get_type_info(&self, type_id: TypeId) -> Option<&'static TypeInfo> { + self.get(type_id) + .map(|registration| registration.type_info()) + } + /// Returns an iterator over the [`TypeRegistration`]s of the registered /// types. pub fn iter(&self) -> impl Iterator { @@ -179,23 +224,21 @@ impl TypeRegistryArc { /// A record of data about a type. /// -/// This contains the [`TypeId`], [name], and [short name] of the type. +/// This contains the [`TypeInfo`] of the type, as well as its [short name]. /// /// For each trait specified by the [`#[reflect(_)]`][0] attribute of /// [`#[derive(Reflect)]`][1] on the registered type, this record also contains /// a [`TypeData`] which can be used to downcast [`Reflect`] trait objects of /// this type to trait objects of the relevant trait. /// -/// [`TypeId`]: std::any::TypeId -/// [name]: std::any::type_name /// [short name]: TypeRegistration::get_short_name +/// [`TypeInfo`]: crate::TypeInfo /// [0]: crate::Reflect /// [1]: crate::Reflect pub struct TypeRegistration { - type_id: TypeId, short_name: String, - name: &'static str, data: HashMap>, + type_info: &'static TypeInfo, } impl TypeRegistration { @@ -204,7 +247,7 @@ impl TypeRegistration { /// [`TypeId`]: std::any::TypeId #[inline] pub fn type_id(&self) -> TypeId { - self.type_id + self.type_info.type_id() } /// Returns a reference to the value of type `T` in this registration's type @@ -227,6 +270,11 @@ impl TypeRegistration { .and_then(|value| value.downcast_mut()) } + /// Returns a reference to the registration's [`TypeInfo`] + pub fn type_info(&self) -> &'static TypeInfo { + self.type_info + } + /// Inserts an instance of `T` into this registration's type data. /// /// If another instance of `T` was previously inserted, it is replaced. @@ -235,14 +283,12 @@ impl TypeRegistration { } /// Creates type registration information for `T`. - pub fn of() -> Self { - let ty = TypeId::of::(); + pub fn of() -> Self { let type_name = std::any::type_name::(); Self { - type_id: ty, data: HashMap::default(), - name: type_name, short_name: Self::get_short_name(type_name), + type_info: T::type_info(), } } @@ -253,9 +299,11 @@ impl TypeRegistration { &self.short_name } - /// Returns the name of the type. - pub fn name(&self) -> &'static str { - self.name + /// Returns the [name] of the type. + /// + /// [name]: std::any::type_name + pub fn type_name(&self) -> &'static str { + self.type_info.type_name() } /// Calculates the short name of a type. @@ -311,9 +359,8 @@ impl Clone for TypeRegistration { TypeRegistration { data, - name: self.name, short_name: self.short_name.clone(), - type_id: self.type_id, + type_info: self.type_info, } } } @@ -343,13 +390,41 @@ pub trait FromType { fn from_type() -> Self; } +/// A struct used to serialize reflected instances of a type. +/// +/// A `ReflectSerialize` for type `T` can be obtained via +/// [`FromType::from_type`]. +#[derive(Clone)] +pub struct ReflectSerialize { + get_serializable: for<'a> fn(value: &'a dyn Reflect) -> Serializable, +} + +impl FromType for ReflectSerialize { + fn from_type() -> Self { + ReflectSerialize { + get_serializable: |value| { + let value = value.downcast_ref::().unwrap_or_else(|| { + panic!("ReflectSerialize::get_serialize called with type `{}`, even though it was created for `{}`", value.type_name(), std::any::type_name::()) + }); + Serializable::Borrowed(value) + }, + } + } +} + +impl ReflectSerialize { + /// Turn the value into a serializable representation + pub fn get_serializable<'a>(&self, value: &'a dyn Reflect) -> Serializable<'a> { + (self.get_serializable)(value) + } +} + /// A struct used to deserialize reflected instances of a type. /// /// A `ReflectDeserialize` for type `T` can be obtained via /// [`FromType::from_type`]. #[derive(Clone)] pub struct ReflectDeserialize { - #[allow(clippy::type_complexity)] pub func: fn( deserializer: &mut dyn erased_serde::Deserializer, ) -> Result, erased_serde::Error>, diff --git a/crates/bevy_reflect/src/utility.rs b/crates/bevy_reflect/src/utility.rs new file mode 100644 index 0000000000000..a597539afdb02 --- /dev/null +++ b/crates/bevy_reflect/src/utility.rs @@ -0,0 +1,142 @@ +//! Helpers for working with Bevy reflection. + +use crate::TypeInfo; +use bevy_utils::HashMap; +use once_cell::race::OnceBox; +use parking_lot::RwLock; +use std::any::{Any, TypeId}; + +/// A container for [`TypeInfo`] over non-generic types, allowing instances to be stored statically. +/// +/// This is specifically meant for use with _non_-generic types. If your type _is_ generic, +/// then use [`GenericTypeInfoCell`] instead. Otherwise, it will not take into account all +/// monomorphizations of your type. +/// +/// ## Example +/// +/// ``` +/// # use std::any::Any; +/// # use bevy_reflect::{NamedField, Reflect, ReflectMut, ReflectRef, StructInfo, Typed, TypeInfo}; +/// use bevy_reflect::utility::NonGenericTypeInfoCell; +/// +/// struct Foo { +/// bar: i32 +/// } +/// +/// impl Typed for Foo { +/// fn type_info() -> &'static TypeInfo { +/// static CELL: NonGenericTypeInfoCell = NonGenericTypeInfoCell::new(); +/// CELL.get_or_set(|| { +/// let fields = [NamedField::new::("bar")]; +/// let info = StructInfo::new::(&fields); +/// TypeInfo::Struct(info) +/// }) +/// } +/// } +/// # +/// # unsafe impl Reflect for Foo { +/// # fn type_name(&self) -> &str { todo!() } +/// # fn get_type_info(&self) -> &'static TypeInfo { todo!() } +/// # fn any(&self) -> &dyn Any { todo!() } +/// # fn any_mut(&mut self) -> &mut dyn Any { todo!() } +/// # fn as_reflect(&self) -> &dyn Reflect { todo!() } +/// # fn as_reflect_mut(&mut self) -> &mut dyn Reflect { todo!() } +/// # fn apply(&mut self, value: &dyn Reflect) { todo!() } +/// # fn set(&mut self, value: Box) -> Result<(), Box> { todo!() } +/// # fn reflect_ref(&self) -> ReflectRef { todo!() } +/// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } +/// # fn clone_value(&self) -> Box { todo!() } +/// # } +/// ``` +pub struct NonGenericTypeInfoCell(OnceBox); + +impl NonGenericTypeInfoCell { + /// Initialize a [`NonGenericTypeInfoCell`] for non-generic types. + pub const fn new() -> Self { + Self(OnceBox::new()) + } + + /// Returns a reference to the [`TypeInfo`] stored in the cell. + /// + /// If there is no [`TypeInfo`] found, a new one will be generated from the given function. + /// + /// [`TypeInfos`]: TypeInfo + pub fn get_or_set(&self, f: F) -> &TypeInfo + where + F: FnOnce() -> TypeInfo, + { + self.0.get_or_init(|| Box::new(f())) + } +} + +/// A container for [`TypeInfo`] over generic types, allowing instances to be stored statically. +/// +/// This is specifically meant for use with generic types. If your type isn't generic, +/// then use [`NonGenericTypeInfoCell`] instead as it should be much more performant. +/// +/// ## Example +/// +/// ``` +/// # use std::any::Any; +/// # use bevy_reflect::{Reflect, ReflectMut, ReflectRef, TupleStructInfo, Typed, TypeInfo, UnnamedField}; +/// use bevy_reflect::utility::GenericTypeInfoCell; +/// +/// struct Foo(T); +/// +/// impl Typed for Foo { +/// fn type_info() -> &'static TypeInfo { +/// static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); +/// CELL.get_or_insert::(|| { +/// let fields = [UnnamedField::new::(0)]; +/// let info = TupleStructInfo::new::(&fields); +/// TypeInfo::TupleStruct(info) +/// }) +/// } +/// } +/// # +/// # unsafe impl Reflect for Foo { +/// # fn type_name(&self) -> &str { todo!() } +/// # fn get_type_info(&self) -> &'static TypeInfo { todo!() } +/// # fn any(&self) -> &dyn Any { todo!() } +/// # fn any_mut(&mut self) -> &mut dyn Any { todo!() } +/// # fn as_reflect(&self) -> &dyn Reflect { todo!() } +/// # fn as_reflect_mut(&mut self) -> &mut dyn Reflect { todo!() } +/// # fn apply(&mut self, value: &dyn Reflect) { todo!() } +/// # fn set(&mut self, value: Box) -> Result<(), Box> { todo!() } +/// # fn reflect_ref(&self) -> ReflectRef { todo!() } +/// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } +/// # fn clone_value(&self) -> Box { todo!() } +/// # } +/// ``` +pub struct GenericTypeInfoCell(OnceBox>>); + +impl GenericTypeInfoCell { + /// Initialize a [`GenericTypeInfoCell`] for generic types. + pub const fn new() -> Self { + Self(OnceBox::new()) + } + + /// Returns a reference to the [`TypeInfo`] stored in the cell. + /// + /// This method will then return the correct [`TypeInfo`] reference for the given type `T`. + /// If there is no [`TypeInfo`] found, a new one will be generated from the given function. + pub fn get_or_insert(&self, f: F) -> &TypeInfo + where + T: Any + ?Sized, + F: FnOnce() -> TypeInfo, + { + let type_id = TypeId::of::(); + let mapping = self.0.get_or_init(|| Box::new(RwLock::default())); + if let Some(info) = mapping.read().get(&type_id) { + return info; + } + + mapping.write().entry(type_id).or_insert_with(|| { + // We leak here in order to obtain a `&'static` reference. + // Otherwise, we won't be able to return a reference due to the `RwLock`. + // This should be okay, though, since we expect it to remain statically + // available over the course of the application. + Box::leak(Box::new(f())) + }) + } +} diff --git a/crates/bevy_render/Cargo.toml b/crates/bevy_render/Cargo.toml index df6230dca6d07..879dbbbf9af9d 100644 --- a/crates/bevy_render/Cargo.toml +++ b/crates/bevy_render/Cargo.toml @@ -31,17 +31,19 @@ webgl = ["wgpu/webgl"] bevy_app = { path = "../bevy_app", version = "0.8.0-dev" } bevy_asset = { path = "../bevy_asset", version = "0.8.0-dev" } bevy_core = { path = "../bevy_core", version = "0.8.0-dev" } -bevy_crevice = { path = "../bevy_crevice", version = "0.8.0-dev", features = ["glam"] } bevy_derive = { path = "../bevy_derive", version = "0.8.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev" } +bevy_encase_derive = { path = "../bevy_encase_derive", version = "0.8.0-dev" } bevy_math = { path = "../bevy_math", version = "0.8.0-dev" } +bevy_mikktspace = { path = "../bevy_mikktspace", version = "0.8.0-dev" } bevy_reflect = { path = "../bevy_reflect", version = "0.8.0-dev", features = ["bevy"] } +bevy_render_macros = { path = "macros", version = "0.8.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.8.0-dev" } bevy_window = { path = "../bevy_window", version = "0.8.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } # rendering -image = { version = "0.23.12", default-features = false } +image = { version = "0.24", default-features = false } # misc wgpu = { version = "0.12.0", features = ["spirv"] } @@ -52,6 +54,7 @@ bitflags = "1.2.1" smallvec = { version = "1.6", features = ["union", "const_generics"] } once_cell = "1.4.1" # TODO: replace once_cell with std equivalent if/when this lands: https://github.com/rust-lang/rfcs/pull/2788 downcast-rs = "1.2.0" +thread_local = "1.1" thiserror = "1.0" futures-lite = "1.4.0" anyhow = "1.0" @@ -67,3 +70,4 @@ flate2 = { version = "1.0.22", optional = true } ruzstd = { version = "0.2.4", optional = true } # For transcoding of UASTC/ETC1S universal formats, and for .basis file support basis-universal = { version = "0.2.0", optional = true } +encase = { version = "0.2", features = ["glam"] } diff --git a/crates/bevy_render/macros/Cargo.toml b/crates/bevy_render/macros/Cargo.toml new file mode 100644 index 0000000000000..90dd1b4fae801 --- /dev/null +++ b/crates/bevy_render/macros/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "bevy_render_macros" +version = "0.8.0-dev" +edition = "2021" +description = "Derive implementations for bevy_render" +homepage = "https://bevyengine.org" +repository = "https://github.com/bevyengine/bevy" +license = "MIT OR Apache-2.0" +keywords = ["bevy"] + +[lib] +proc-macro = true + +[dependencies] +bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.8.0-dev" } + +syn = "1.0" +proc-macro2 = "1.0" +quote = "1.0" diff --git a/crates/bevy_render/macros/src/extract_resource.rs b/crates/bevy_render/macros/src/extract_resource.rs new file mode 100644 index 0000000000000..0a35eb4d5dd2b --- /dev/null +++ b/crates/bevy_render/macros/src/extract_resource.rs @@ -0,0 +1,26 @@ +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, parse_quote, DeriveInput, Path}; + +pub fn derive_extract_resource(input: TokenStream) -> TokenStream { + let mut ast = parse_macro_input!(input as DeriveInput); + let bevy_render_path: Path = crate::bevy_render_path(); + + ast.generics + .make_where_clause() + .predicates + .push(parse_quote! { Self: Clone }); + + let struct_name = &ast.ident; + let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl(); + + TokenStream::from(quote! { + impl #impl_generics #bevy_render_path::extract_resource::ExtractResource for #struct_name #type_generics #where_clause { + type Source = Self; + + fn extract_resource(source: &Self::Source) -> Self { + source.clone() + } + } + }) +} diff --git a/crates/bevy_render/macros/src/lib.rs b/crates/bevy_render/macros/src/lib.rs new file mode 100644 index 0000000000000..0079780b8e027 --- /dev/null +++ b/crates/bevy_render/macros/src/lib.rs @@ -0,0 +1,16 @@ +mod extract_resource; + +use bevy_macro_utils::BevyManifest; +use proc_macro::TokenStream; + +pub(crate) fn bevy_render_path() -> syn::Path { + BevyManifest::default() + .maybe_get_path("bevy_render") + // NOTE: If the derivation is within bevy_render, then we need to return 'crate' + .unwrap_or_else(|| BevyManifest::parse_str("crate")) +} + +#[proc_macro_derive(ExtractResource)] +pub fn derive_extract_resource(input: TokenStream) -> TokenStream { + extract_resource::derive_extract_resource(input) +} diff --git a/crates/bevy_render/src/camera/bundle.rs b/crates/bevy_render/src/camera/bundle.rs deleted file mode 100644 index 39bda7802bbd8..0000000000000 --- a/crates/bevy_render/src/camera/bundle.rs +++ /dev/null @@ -1,164 +0,0 @@ -use super::{CameraProjection, ScalingMode}; -use crate::{ - camera::{Camera, DepthCalculation, OrthographicProjection, PerspectiveProjection}, - primitives::Frustum, - view::VisibleEntities, -}; -use bevy_ecs::reflect::ReflectComponent; -use bevy_ecs::{bundle::Bundle, prelude::Component}; -use bevy_math::Vec3; -use bevy_reflect::Reflect; -use bevy_transform::components::{GlobalTransform, Transform}; - -#[derive(Component, Default, Reflect)] -#[reflect(Component)] -pub struct Camera3d; - -#[derive(Component, Default, Reflect)] -#[reflect(Component)] -pub struct Camera2d; - -/// Component bundle for camera entities with perspective projection -/// -/// Use this for 3D rendering. -#[derive(Bundle)] -pub struct PerspectiveCameraBundle { - pub camera: Camera, - pub perspective_projection: PerspectiveProjection, - pub visible_entities: VisibleEntities, - pub frustum: Frustum, - pub transform: Transform, - pub global_transform: GlobalTransform, - pub marker: M, -} - -impl Default for PerspectiveCameraBundle { - fn default() -> Self { - PerspectiveCameraBundle::new_3d() - } -} - -impl PerspectiveCameraBundle { - pub fn new_3d() -> Self { - PerspectiveCameraBundle::new() - } -} - -impl PerspectiveCameraBundle { - pub fn new() -> Self { - let perspective_projection = PerspectiveProjection::default(); - let view_projection = perspective_projection.get_projection_matrix(); - let frustum = Frustum::from_view_projection( - &view_projection, - &Vec3::ZERO, - &Vec3::Z, - perspective_projection.far(), - ); - PerspectiveCameraBundle { - camera: Camera::default(), - perspective_projection, - visible_entities: VisibleEntities::default(), - frustum, - transform: Default::default(), - global_transform: Default::default(), - marker: M::default(), - } - } -} - -/// Component bundle for camera entities with orthographic projection -/// -/// Use this for 2D games, isometric games, CAD-like 3D views. -#[derive(Bundle)] -pub struct OrthographicCameraBundle { - pub camera: Camera, - pub orthographic_projection: OrthographicProjection, - pub visible_entities: VisibleEntities, - pub frustum: Frustum, - pub transform: Transform, - pub global_transform: GlobalTransform, - pub marker: M, -} - -impl OrthographicCameraBundle { - pub fn new_3d() -> Self { - let orthographic_projection = OrthographicProjection { - scaling_mode: ScalingMode::FixedVertical, - depth_calculation: DepthCalculation::Distance, - ..Default::default() - }; - let view_projection = orthographic_projection.get_projection_matrix(); - let frustum = Frustum::from_view_projection( - &view_projection, - &Vec3::ZERO, - &Vec3::Z, - orthographic_projection.far(), - ); - OrthographicCameraBundle { - camera: Camera::default(), - orthographic_projection, - visible_entities: VisibleEntities::default(), - frustum, - transform: Default::default(), - global_transform: Default::default(), - marker: Camera3d, - } - } -} - -impl OrthographicCameraBundle { - /// Create an orthographic projection camera to render 2D content. - /// - /// The projection creates a camera space where X points to the right of the screen, - /// Y points to the top of the screen, and Z points out of the screen (backward), - /// forming a right-handed coordinate system. The center of the screen is at `X=0` and - /// `Y=0`. - /// - /// The default scaling mode is [`ScalingMode::WindowSize`], resulting in a resolution - /// where 1 unit in X and Y in camera space corresponds to 1 logical pixel on the screen. - /// That is, for a screen of 1920 pixels in width, the X coordinates visible on screen go - /// from `X=-960` to `X=+960` in world space, left to right. This can be changed by changing - /// the [`OrthographicProjection::scaling_mode`] field. - /// - /// The camera is placed at `Z=+1000-0.1`, looking toward the world origin `(0,0,0)`. - /// Its orthographic projection extends from `0.0` to `-1000.0` in camera view space, - /// corresponding to `Z=+999.9` (closest to camera) to `Z=-0.1` (furthest away from - /// camera) in world space. - pub fn new_2d() -> Self { - Self::new_2d_with_far(1000.0) - } - - /// Create an orthographic projection camera with a custom Z position. - /// - /// The camera is placed at `Z=far-0.1`, looking toward the world origin `(0,0,0)`. - /// Its orthographic projection extends from `0.0` to `-far` in camera view space, - /// corresponding to `Z=far-0.1` (closest to camera) to `Z=-0.1` (furthest away from - /// camera) in world space. - pub fn new_2d_with_far(far: f32) -> Self { - // we want 0 to be "closest" and +far to be "farthest" in 2d, so we offset - // the camera's translation by far and use a right handed coordinate system - let orthographic_projection = OrthographicProjection { - far, - depth_calculation: DepthCalculation::ZDifference, - ..Default::default() - }; - let transform = Transform::from_xyz(0.0, 0.0, far - 0.1); - let view_projection = - orthographic_projection.get_projection_matrix() * transform.compute_matrix().inverse(); - let frustum = Frustum::from_view_projection( - &view_projection, - &transform.translation, - &transform.back(), - orthographic_projection.far(), - ); - OrthographicCameraBundle { - camera: Camera::default(), - orthographic_projection, - visible_entities: VisibleEntities::default(), - frustum, - transform, - global_transform: Default::default(), - marker: Camera2d, - } - } -} diff --git a/crates/bevy_render/src/camera/camera.rs b/crates/bevy_render/src/camera/camera.rs index 852f3553649fe..06f8ebf818f7e 100644 --- a/crates/bevy_render/src/camera/camera.rs +++ b/crates/bevy_render/src/camera/camera.rs @@ -1,24 +1,20 @@ -use std::marker::PhantomData; - use crate::{ camera::CameraProjection, prelude::Image, render_asset::RenderAssets, render_resource::TextureView, view::{ExtractedView, ExtractedWindows, VisibleEntities}, - RenderApp, RenderStage, }; -use bevy_app::{App, CoreStage, Plugin, StartupStage}; use bevy_asset::{AssetEvent, Assets, Handle}; +use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ change_detection::DetectChanges, component::Component, entity::Entity, event::EventReader, - prelude::With, query::Added, reflect::ReflectComponent, - system::{Commands, ParamSet, Query, Res, ResMut}, + system::{Commands, ParamSet, Query, Res}, }; use bevy_math::{Mat4, UVec2, Vec2, Vec3}; use bevy_reflect::prelude::*; @@ -26,19 +22,226 @@ use bevy_transform::components::GlobalTransform; use bevy_utils::HashSet; use bevy_window::{WindowCreated, WindowId, WindowResized, Windows}; use serde::{Deserialize, Serialize}; +use std::{borrow::Cow, ops::Range}; use wgpu::Extent3d; -#[derive(Component, Default, Debug, Reflect, Clone)] -#[reflect(Component, Default)] +/// Render viewport configuration for the [`Camera`] component. +/// +/// The viewport defines the area on the render target to which the camera renders its image. +/// You can overlay multiple cameras in a single window using viewports to create effects like +/// split screen, minimaps, and character viewers. +// TODO: remove reflect_value when possible +#[derive(Reflect, Debug, Clone, Serialize, Deserialize)] +#[reflect_value(Default, Serialize, Deserialize)] +pub struct Viewport { + /// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`]. + /// (0,0) corresponds to the top-left corner + pub physical_position: UVec2, + /// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`]. + /// The origin of the rectangle is in the top-left corner. + pub physical_size: UVec2, + /// The minimum and maximum depth to render (on a scale from 0.0 to 1.0). + pub depth: Range, +} + +impl Default for Viewport { + fn default() -> Self { + Self { + physical_position: Default::default(), + physical_size: Default::default(), + depth: 0.0..1.0, + } + } +} + +/// Information about the current [`RenderTarget`]. +#[derive(Default, Debug, Clone)] +pub struct RenderTargetInfo { + /// The physical size of this render target (ignores scale factor). + pub physical_size: UVec2, + /// The scale factor of this render target. + pub scale_factor: f64, +} + +/// Holds internally computed [`Camera`] values. +#[derive(Default, Debug, Clone)] +pub struct ComputedCameraValues { + projection_matrix: Mat4, + target_info: Option, +} + +#[derive(Component, Debug, Reflect, Clone)] +#[reflect(Component)] pub struct Camera { - pub projection_matrix: Mat4, + /// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`]. + pub viewport: Option, + /// Cameras with a lower priority will be rendered before cameras with a higher priority. + pub priority: isize, + /// If this is set to true, this camera will be rendered to its specified [`RenderTarget`]. If false, this + /// camera will not be rendered. + pub is_active: bool, + /// The method used to calculate this camera's depth. This will be used for projections and visibility. + pub depth_calculation: DepthCalculation, + /// Computed values for this camera, such as the projection matrix and the render target size. #[reflect(ignore)] - pub target: RenderTarget, + pub computed: ComputedCameraValues, + /// The "target" that this camera will render to. #[reflect(ignore)] - pub depth_calculation: DepthCalculation, + pub target: RenderTarget, } -#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash)] +impl Default for Camera { + fn default() -> Self { + Self { + is_active: true, + priority: 0, + viewport: None, + computed: Default::default(), + target: Default::default(), + depth_calculation: Default::default(), + } + } +} + +impl Camera { + /// Converts a physical size in this `Camera` to a logical size. + #[inline] + pub fn to_logical(&self, physical_size: UVec2) -> Option { + let scale = self.computed.target_info.as_ref()?.scale_factor; + Some((physical_size.as_dvec2() / scale).as_vec2()) + } + + /// The rendered physical bounds (minimum, maximum) of the camera. If the `viewport` field is + /// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to + /// the full physical rect of the current [`RenderTarget`]. + #[inline] + pub fn physical_viewport_rect(&self) -> Option<(UVec2, UVec2)> { + let min = self + .viewport + .as_ref() + .map(|v| v.physical_position) + .unwrap_or(UVec2::ZERO); + let max = min + self.physical_viewport_size()?; + Some((min, max)) + } + + /// The rendered logical bounds (minimum, maximum) of the camera. If the `viewport` field is set + /// to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the + /// full logical rect of the current [`RenderTarget`]. + #[inline] + pub fn logical_viewport_rect(&self) -> Option<(Vec2, Vec2)> { + let (min, max) = self.physical_viewport_rect()?; + Some((self.to_logical(min)?, self.to_logical(max)?)) + } + + /// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this + /// will be the size of that custom viewport. Otherwise it will default to the full logical size + /// of the current [`RenderTarget`]. + /// For logic that requires the full logical size of the + /// [`RenderTarget`], prefer [`Camera::logical_target_size`]. + #[inline] + pub fn logical_viewport_size(&self) -> Option { + self.viewport + .as_ref() + .and_then(|v| self.to_logical(v.physical_size)) + .or_else(|| self.logical_target_size()) + } + + /// The physical size of this camera's viewport. If the `viewport` field is set to [`Some`], this + /// will be the size of that custom viewport. Otherwise it will default to the full physical size of + /// the current [`RenderTarget`]. + /// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`]. + #[inline] + pub fn physical_viewport_size(&self) -> Option { + self.viewport + .as_ref() + .map(|v| v.physical_size) + .or_else(|| self.physical_target_size()) + } + + /// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration. + /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area. + /// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`]. + #[inline] + pub fn logical_target_size(&self) -> Option { + self.computed + .target_info + .as_ref() + .and_then(|t| self.to_logical(t.physical_size)) + } + + /// The full physical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration. + /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area. + /// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`]. + #[inline] + pub fn physical_target_size(&self) -> Option { + self.computed.target_info.as_ref().map(|t| t.physical_size) + } + + /// The projection matrix computed using this camera's [`CameraProjection`]. + #[inline] + pub fn projection_matrix(&self) -> Mat4 { + self.computed.projection_matrix + } + + /// Given a position in world space, use the camera to compute the viewport-space coordinates. + /// + /// To get the coordinates in Normalized Device Coordinates, you should use + /// [`world_to_ndc`](Self::world_to_ndc). + pub fn world_to_viewport( + &self, + camera_transform: &GlobalTransform, + world_position: Vec3, + ) -> Option { + let target_size = self.logical_viewport_size()?; + let ndc_space_coords = self.world_to_ndc(camera_transform, world_position)?; + // NDC z-values outside of 0 < z < 1 are outside the camera frustum and are thus not in viewport-space + if ndc_space_coords.z < 0.0 || ndc_space_coords.z > 1.0 { + return None; + } + + // Once in NDC space, we can discard the z element and rescale x/y to fit the screen + Some((ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size) + } + + /// Given a position in world space, use the camera's viewport to compute the Normalized Device Coordinates. + /// + /// Values returned will be between -1.0 and 1.0 when the position is within the viewport. + /// To get the coordinates in the render target's viewport dimensions, you should use + /// [`world_to_viewport`](Self::world_to_viewport). + pub fn world_to_ndc( + &self, + camera_transform: &GlobalTransform, + world_position: Vec3, + ) -> Option { + // Build a transform to convert from world to NDC using camera data + let world_to_ndc: Mat4 = + self.computed.projection_matrix * camera_transform.compute_matrix().inverse(); + let ndc_space_coords: Vec3 = world_to_ndc.project_point3(world_position); + + if !ndc_space_coords.is_nan() { + Some(ndc_space_coords) + } else { + None + } + } +} + +/// Configures the [`RenderGraph`](crate::render_graph::RenderGraph) name assigned to be run for a given [`Camera`] entity. +#[derive(Component, Deref, DerefMut, Reflect, Default)] +#[reflect(Component)] +pub struct CameraRenderGraph(Cow<'static, str>); + +impl CameraRenderGraph { + #[inline] + pub fn new>>(name: T) -> Self { + Self(name.into()) + } +} + +/// The "target" that a [`Camera`] will render to. For example, this could be a [`Window`](bevy_window::Window) +/// swapchain or an [`Image`]. +#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum RenderTarget { /// Window to which the camera's view is rendered. Window(WindowId), @@ -67,28 +270,29 @@ impl RenderTarget { } } } - pub fn get_physical_size(&self, windows: &Windows, images: &Assets) -> Option { - match self { - RenderTarget::Window(window_id) => windows - .get(*window_id) - .map(|window| UVec2::new(window.physical_width(), window.physical_height())), - RenderTarget::Image(image_handle) => images.get(image_handle).map(|image| { - let Extent3d { width, height, .. } = image.texture_descriptor.size; - UVec2::new(width, height) - }), - } - .filter(|size| size.x > 0 && size.y > 0) - } - pub fn get_logical_size(&self, windows: &Windows, images: &Assets) -> Option { - match self { - RenderTarget::Window(window_id) => windows - .get(*window_id) - .map(|window| Vec2::new(window.width(), window.height())), - RenderTarget::Image(image_handle) => images.get(image_handle).map(|image| { + + pub fn get_render_target_info( + &self, + windows: &Windows, + images: &Assets, + ) -> Option { + Some(match self { + RenderTarget::Window(window_id) => { + let window = windows.get(*window_id)?; + RenderTargetInfo { + physical_size: UVec2::new(window.physical_width(), window.physical_height()), + scale_factor: window.scale_factor(), + } + } + RenderTarget::Image(image_handle) => { + let image = images.get(image_handle)?; let Extent3d { width, height, .. } = image.texture_descriptor.size; - Vec2::new(width as f32, height as f32) - }), - } + RenderTargetInfo { + physical_size: UVec2::new(width, height), + scale_factor: 1.0, + } + } + }) } // Check if this render target is contained in the given changed windows or images. fn is_changed( @@ -118,53 +322,6 @@ impl Default for DepthCalculation { } } -impl Camera { - /// Given a position in world space, use the camera to compute the screen space coordinates. - /// - /// To get the coordinates in Normalized Device Coordinates, you should use - /// [`world_to_ndc`](Self::world_to_ndc). - pub fn world_to_screen( - &self, - windows: &Windows, - images: &Assets, - camera_transform: &GlobalTransform, - world_position: Vec3, - ) -> Option { - let window_size = self.target.get_logical_size(windows, images)?; - let ndc_space_coords = self.world_to_ndc(camera_transform, world_position)?; - // NDC z-values outside of 0 < z < 1 are outside the camera frustum and are thus not in screen space - if ndc_space_coords.z < 0.0 || ndc_space_coords.z > 1.0 { - return None; - } - - // Once in NDC space, we can discard the z element and rescale x/y to fit the screen - Some((ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * window_size) - } - - /// Given a position in world space, use the camera to compute the Normalized Device Coordinates. - /// - /// Values returned will be between -1.0 and 1.0 when the position is in screen space. - /// To get the coordinates in the render target dimensions, you should use - /// [`world_to_screen`](Self::world_to_screen). - pub fn world_to_ndc( - &self, - camera_transform: &GlobalTransform, - world_position: Vec3, - ) -> Option { - // Build a transform to convert from world to NDC using camera data - let world_to_ndc: Mat4 = - self.projection_matrix * camera_transform.compute_matrix().inverse(); - let ndc_space_coords: Vec3 = world_to_ndc.project_point3(world_position); - - if !ndc_space_coords.is_nan() { - Some(ndc_space_coords) - } else { - None - } - } -} - -#[allow(clippy::type_complexity)] pub fn camera_system( mut window_resized_events: EventReader, mut window_created_events: EventReader, @@ -219,125 +376,64 @@ pub fn camera_system( || added_cameras.contains(&entity) || camera_projection.is_changed() { - if let Some(size) = camera.target.get_logical_size(&windows, &images) { + camera.computed.target_info = camera.target.get_render_target_info(&windows, &images); + if let Some(size) = camera.logical_viewport_size() { camera_projection.update(size.x, size.y); - camera.projection_matrix = camera_projection.get_projection_matrix(); + camera.computed.projection_matrix = camera_projection.get_projection_matrix(); camera.depth_calculation = camera_projection.depth_calculation(); } } } } -pub struct CameraTypePlugin(PhantomData); - -impl Default for CameraTypePlugin { - fn default() -> Self { - Self(Default::default()) - } -} - -impl Plugin for CameraTypePlugin { - fn build(&self, app: &mut App) { - app.init_resource::>() - .add_startup_system_to_stage(StartupStage::PostStartup, set_active_camera::) - .add_system_to_stage(CoreStage::PostUpdate, set_active_camera::); - if let Ok(render_app) = app.get_sub_app_mut(RenderApp) { - render_app.add_system_to_stage(RenderStage::Extract, extract_cameras::); - } - } -} - -/// The canonical source of the "active camera" of the given camera type `T`. -#[derive(Debug)] -pub struct ActiveCamera { - camera: Option, - marker: PhantomData, -} - -impl Default for ActiveCamera { - fn default() -> Self { - Self { - camera: Default::default(), - marker: Default::default(), - } - } -} - -impl Clone for ActiveCamera { - fn clone(&self) -> Self { - Self { - camera: self.camera, - marker: self.marker, - } - } -} - -impl ActiveCamera { - /// Sets the active camera to the given `camera` entity. - pub fn set(&mut self, camera: Entity) { - self.camera = Some(camera); - } - - /// Returns the active camera, if it exists. - pub fn get(&self) -> Option { - self.camera - } -} - -pub fn set_active_camera( - mut active_camera: ResMut>, - cameras: Query, With)>, -) { - // Check if there is already an active camera set and - // that it has not been deleted on the previous frame - if let Some(camera) = active_camera.get() { - if cameras.contains(camera) { - return; - } - } - - // If the previous active camera ceased to exist - // fallback to another camera of the same type T - if let Some(camera) = cameras.iter().next() { - active_camera.camera = Some(camera); - } else { - active_camera.camera = None; - } -} - #[derive(Component, Debug)] pub struct ExtractedCamera { pub target: RenderTarget, - pub physical_size: Option, + pub physical_viewport_size: Option, + pub physical_target_size: Option, + pub viewport: Option, + pub render_graph: Cow<'static, str>, + pub priority: isize, } -pub fn extract_cameras( +pub fn extract_cameras( mut commands: Commands, - windows: Res, - images: Res>, - active_camera: Res>, - query: Query<(&Camera, &GlobalTransform, &VisibleEntities), With>, + query: Query<( + Entity, + &Camera, + &CameraRenderGraph, + &GlobalTransform, + &VisibleEntities, + )>, ) { - if let Some(entity) = active_camera.get() { - if let Ok((camera, transform, visible_entities)) = query.get(entity) { - if let Some(size) = camera.target.get_physical_size(&windows, &images) { - commands.get_or_spawn(entity).insert_bundle(( - ExtractedCamera { - target: camera.target.clone(), - physical_size: camera.target.get_physical_size(&windows, &images), - }, - ExtractedView { - projection: camera.projection_matrix, - transform: *transform, - width: size.x, - height: size.y, - }, - visible_entities.clone(), - M::default(), - )); + for (entity, camera, camera_render_graph, transform, visible_entities) in query.iter() { + if !camera.is_active { + continue; + } + if let (Some(viewport_size), Some(target_size)) = ( + camera.physical_viewport_size(), + camera.physical_target_size(), + ) { + if target_size.x == 0 || target_size.y == 0 { + continue; } + commands.get_or_spawn(entity).insert_bundle(( + ExtractedCamera { + target: camera.target.clone(), + viewport: camera.viewport.clone(), + physical_viewport_size: Some(viewport_size), + physical_target_size: Some(target_size), + render_graph: camera_render_graph.0.clone(), + priority: camera.priority, + }, + ExtractedView { + projection: camera.projection_matrix(), + transform: *transform, + width: viewport_size.x, + height: viewport_size.y, + }, + visible_entities.clone(), + )); } } - - commands.insert_resource(active_camera.clone()) } diff --git a/crates/bevy_render/src/camera/camera_driver_node.rs b/crates/bevy_render/src/camera/camera_driver_node.rs new file mode 100644 index 0000000000000..fab057dc2eb7b --- /dev/null +++ b/crates/bevy_render/src/camera/camera_driver_node.rs @@ -0,0 +1,109 @@ +use crate::{ + camera::{ExtractedCamera, RenderTarget}, + render_graph::{Node, NodeRunError, RenderGraphContext, SlotValue}, + renderer::RenderContext, + view::ExtractedWindows, +}; +use bevy_ecs::{entity::Entity, prelude::QueryState, world::World}; +use bevy_utils::{tracing::warn, HashSet}; +use wgpu::{LoadOp, Operations, RenderPassColorAttachment, RenderPassDescriptor}; + +pub struct CameraDriverNode { + cameras: QueryState<(Entity, &'static ExtractedCamera)>, +} + +impl CameraDriverNode { + pub fn new(world: &mut World) -> Self { + Self { + cameras: world.query(), + } + } +} + +impl Node for CameraDriverNode { + fn update(&mut self, world: &mut World) { + self.cameras.update_archetypes(world); + } + fn run( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext, + world: &World, + ) -> Result<(), NodeRunError> { + let mut sorted_cameras = self + .cameras + .iter_manual(world) + .map(|(e, c)| (e, c.priority, c.target.clone())) + .collect::>(); + // sort by priority and ensure within a priority, RenderTargets of the same type are packed together + sorted_cameras.sort_by(|(_, p1, t1), (_, p2, t2)| match p1.cmp(p2) { + std::cmp::Ordering::Equal => t1.cmp(t2), + ord => ord, + }); + let mut camera_windows = HashSet::new(); + let mut previous_priority_target = None; + let mut ambiguities = HashSet::new(); + for (entity, priority, target) in sorted_cameras { + let new_priority_target = (priority, target); + if let Some(previous_priority_target) = previous_priority_target { + if previous_priority_target == new_priority_target { + ambiguities.insert(new_priority_target.clone()); + } + } + previous_priority_target = Some(new_priority_target); + if let Ok((_, camera)) = self.cameras.get_manual(world, entity) { + if let RenderTarget::Window(id) = camera.target { + camera_windows.insert(id); + } + graph + .run_sub_graph(camera.render_graph.clone(), vec![SlotValue::Entity(entity)])?; + } + } + + if !ambiguities.is_empty() { + warn!( + "Camera priority ambiguities detected for active cameras with the following priorities: {:?}. \ + To fix this, ensure there is exactly one Camera entity spawned with a given priority for a given RenderTarget. \ + Ambiguities should be resolved because either (1) multiple active cameras were spawned accidentally, which will \ + result in rendering multiple instances of the scene or (2) for cases where multiple active cameras is intentional, \ + ambiguities could result in unpredictable render results.", + ambiguities + ); + } + + // wgpu (and some backends) require doing work for swap chains if you call `get_current_texture()` and `present()` + // This ensures that Bevy doesn't crash, even when there are no cameras (and therefore no work submitted). + for (id, window) in world.resource::().iter() { + if camera_windows.contains(id) { + continue; + } + + let swap_chain_texture = if let Some(swap_chain_texture) = &window.swap_chain_texture { + swap_chain_texture + } else { + continue; + }; + + #[cfg(feature = "trace")] + let _span = bevy_utils::tracing::info_span!("no_camera_clear_pass").entered(); + let pass_descriptor = RenderPassDescriptor { + label: Some("no_camera_clear_pass"), + color_attachments: &[RenderPassColorAttachment { + view: swap_chain_texture, + resolve_target: None, + ops: Operations { + load: LoadOp::Clear(wgpu::Color::BLACK), + store: true, + }, + }], + depth_stencil_attachment: None, + }; + + render_context + .command_encoder + .begin_render_pass(&pass_descriptor); + } + + Ok(()) + } +} diff --git a/crates/bevy_render/src/camera/mod.rs b/crates/bevy_render/src/camera/mod.rs index bf1d1d5d8c101..37acb0b895ea7 100644 --- a/crates/bevy_render/src/camera/mod.rs +++ b/crates/bevy_render/src/camera/mod.rs @@ -1,19 +1,19 @@ -mod bundle; #[allow(clippy::module_inception)] mod camera; +mod camera_driver_node; mod projection; -pub use bundle::*; pub use camera::*; +pub use camera_driver_node::*; pub use projection::*; use crate::{ primitives::Aabb, + render_graph::RenderGraph, view::{ComputedVisibility, Visibility, VisibleEntities}, + RenderApp, RenderStage, }; -use bevy_app::{App, CoreStage, Plugin}; -use bevy_ecs::schedule::ParallelSystemDescriptorCoercion; -use bevy_window::ModifiesWindows; +use bevy_app::{App, Plugin}; #[derive(Default)] pub struct CameraPlugin; @@ -23,24 +23,22 @@ impl Plugin for CameraPlugin { app.register_type::() .register_type::() .register_type::() - .register_type::() - .register_type::() .register_type::() .register_type::() .register_type::() .register_type::() .register_type::() - .register_type::() - .register_type::() - .add_system_to_stage( - CoreStage::PostUpdate, - crate::camera::camera_system::.after(ModifiesWindows), - ) - .add_system_to_stage( - CoreStage::PostUpdate, - crate::camera::camera_system::.after(ModifiesWindows), - ) - .add_plugin(CameraTypePlugin::::default()) - .add_plugin(CameraTypePlugin::::default()); + .register_type::() + .add_plugin(CameraProjectionPlugin::::default()) + .add_plugin(CameraProjectionPlugin::::default()) + .add_plugin(CameraProjectionPlugin::::default()); + + if let Ok(render_app) = app.get_sub_app_mut(RenderApp) { + render_app.add_system_to_stage(RenderStage::Extract, extract_cameras); + + let camera_driver_node = CameraDriverNode::new(&mut render_app.world); + let mut render_graph = render_app.world.resource_mut::(); + render_graph.add_node(crate::main_graph::node::CAMERA_DRIVER, camera_driver_node); + } } } diff --git a/crates/bevy_render/src/camera/projection.rs b/crates/bevy_render/src/camera/projection.rs index 3f983fa8b3330..aaf0e761bba37 100644 --- a/crates/bevy_render/src/camera/projection.rs +++ b/crates/bevy_render/src/camera/projection.rs @@ -1,10 +1,43 @@ +use std::marker::PhantomData; + use super::DepthCalculation; -use bevy_ecs::{component::Component, reflect::ReflectComponent}; +use bevy_app::{App, CoreStage, Plugin, StartupStage}; +use bevy_ecs::{prelude::*, reflect::ReflectComponent}; use bevy_math::Mat4; -use bevy_reflect::std_traits::ReflectDefault; -use bevy_reflect::{Reflect, ReflectDeserialize}; +use bevy_reflect::{ + std_traits::ReflectDefault, GetTypeRegistration, Reflect, ReflectDeserialize, ReflectSerialize, +}; +use bevy_window::ModifiesWindows; use serde::{Deserialize, Serialize}; +/// Adds [`Camera`](crate::camera::Camera) driver systems for a given projection type. +pub struct CameraProjectionPlugin(PhantomData); + +impl Default for CameraProjectionPlugin { + fn default() -> Self { + Self(Default::default()) + } +} + +#[derive(SystemLabel, Clone, Eq, PartialEq, Hash, Debug)] +pub struct CameraUpdateSystem; + +impl Plugin for CameraProjectionPlugin { + fn build(&self, app: &mut App) { + app.register_type::() + .add_startup_system_to_stage( + StartupStage::PostStartup, + crate::camera::camera_system::, + ) + .add_system_to_stage( + CoreStage::PostUpdate, + crate::camera::camera_system:: + .label(CameraUpdateSystem) + .after(ModifiesWindows), + ); + } +} + pub trait CameraProjection { fn get_projection_matrix(&self) -> Mat4; fn update(&mut self, width: f32, height: f32); @@ -12,6 +45,62 @@ pub trait CameraProjection { fn far(&self) -> f32; } +/// A configurable [`CameraProjection`] that can select its projection type at runtime. +#[derive(Component, Debug, Clone, Reflect)] +#[reflect(Component, Default)] +pub enum Projection { + Perspective(PerspectiveProjection), + Orthographic(OrthographicProjection), +} + +impl From for Projection { + fn from(p: PerspectiveProjection) -> Self { + Self::Perspective(p) + } +} + +impl From for Projection { + fn from(p: OrthographicProjection) -> Self { + Self::Orthographic(p) + } +} + +impl CameraProjection for Projection { + fn get_projection_matrix(&self) -> Mat4 { + match self { + Projection::Perspective(projection) => projection.get_projection_matrix(), + Projection::Orthographic(projection) => projection.get_projection_matrix(), + } + } + + fn update(&mut self, width: f32, height: f32) { + match self { + Projection::Perspective(projection) => projection.update(width, height), + Projection::Orthographic(projection) => projection.update(width, height), + } + } + + fn depth_calculation(&self) -> DepthCalculation { + match self { + Projection::Perspective(projection) => projection.depth_calculation(), + Projection::Orthographic(projection) => projection.depth_calculation(), + } + } + + fn far(&self) -> f32 { + match self { + Projection::Perspective(projection) => projection.far(), + Projection::Orthographic(projection) => projection.far(), + } + } +} + +impl Default for Projection { + fn default() -> Self { + Projection::Perspective(Default::default()) + } +} + #[derive(Component, Debug, Clone, Reflect)] #[reflect(Component, Default)] pub struct PerspectiveProjection { @@ -66,10 +155,15 @@ pub enum ScalingMode { None, /// Match the window size. 1 world unit = 1 pixel. WindowSize, + /// Use minimal possible viewport size while keeping the aspect ratio. + /// Arguments are in world units. + Auto { min_width: f32, min_height: f32 }, /// Keep vertical axis constant; resize horizontal with aspect ratio. - FixedVertical, + /// The argument is the desired height of the viewport in world units. + FixedVertical(f32), /// Keep horizontal axis constant; resize vertical with aspect ratio. - FixedHorizontal, + /// The argument is the desired width of the viewport in world units. + FixedHorizontal(f32), } #[derive(Component, Debug, Clone, Reflect)] @@ -102,50 +196,51 @@ impl CameraProjection for OrthographicProjection { } fn update(&mut self, width: f32, height: f32) { - match (&self.scaling_mode, &self.window_origin) { - (ScalingMode::WindowSize, WindowOrigin::Center) => { - let half_width = width / 2.0; - let half_height = height / 2.0; - self.left = -half_width; - self.right = half_width; - self.top = half_height; - self.bottom = -half_height; + let (viewport_width, viewport_height) = match self.scaling_mode { + ScalingMode::WindowSize => (width, height), + ScalingMode::Auto { + min_width, + min_height, + } => { + if width * min_height > min_width * height { + (width * min_height / height, min_height) + } else { + (min_width, height * min_width / width) + } } - (ScalingMode::WindowSize, WindowOrigin::BottomLeft) => { - self.left = 0.0; - self.right = width; - self.top = height; - self.bottom = 0.0; - } - (ScalingMode::FixedVertical, WindowOrigin::Center) => { - let aspect_ratio = width / height; - self.left = -aspect_ratio; - self.right = aspect_ratio; - self.top = 1.0; - self.bottom = -1.0; + ScalingMode::FixedVertical(viewport_height) => { + (width * viewport_height / height, viewport_height) } - (ScalingMode::FixedVertical, WindowOrigin::BottomLeft) => { - let aspect_ratio = width / height; - self.left = 0.0; - self.right = aspect_ratio; - self.top = 1.0; - self.bottom = 0.0; + ScalingMode::FixedHorizontal(viewport_width) => { + (viewport_width, height * viewport_width / width) } - (ScalingMode::FixedHorizontal, WindowOrigin::Center) => { - let aspect_ratio = height / width; - self.left = -1.0; - self.right = 1.0; - self.top = aspect_ratio; - self.bottom = -aspect_ratio; + ScalingMode::None => return, + }; + + match self.window_origin { + WindowOrigin::Center => { + let half_width = viewport_width / 2.0; + let half_height = viewport_height / 2.0; + self.left = -half_width; + self.bottom = -half_height; + self.right = half_width; + self.top = half_height; + + if let ScalingMode::WindowSize = self.scaling_mode { + if self.scale == 1.0 { + self.left = self.left.floor(); + self.bottom = self.bottom.floor(); + self.right = self.right.floor(); + self.top = self.top.floor(); + } + } } - (ScalingMode::FixedHorizontal, WindowOrigin::BottomLeft) => { - let aspect_ratio = height / width; + WindowOrigin::BottomLeft => { self.left = 0.0; - self.right = 1.0; - self.top = aspect_ratio; self.bottom = 0.0; + self.right = viewport_width; + self.top = viewport_height; } - (ScalingMode::None, _) => {} } } diff --git a/crates/bevy_render/src/color/mod.rs b/crates/bevy_render/src/color/mod.rs index b1aaa66079a9a..b249cb1887bce 100644 --- a/crates/bevy_render/src/color/mod.rs +++ b/crates/bevy_render/src/color/mod.rs @@ -4,9 +4,10 @@ pub use colorspace::*; use crate::color::{HslRepresentation, SrgbColorSpace}; use bevy_math::{Vec3, Vec4}; -use bevy_reflect::{FromReflect, Reflect, ReflectDeserialize}; +use bevy_reflect::{FromReflect, Reflect, ReflectDeserialize, ReflectSerialize}; use serde::{Deserialize, Serialize}; use std::ops::{Add, AddAssign, Mul, MulAssign}; +use thiserror::Error; #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Reflect, FromReflect)] #[reflect(PartialEq, Serialize, Deserialize)] @@ -491,8 +492,8 @@ impl Color { } } - /// Converts a `Color` to a `[f32; 4]` from HLS colorspace - pub fn as_hlsa_f32(self: Color) -> [f32; 4] { + /// Converts a `Color` to a `[f32; 4]` from HSL colorspace + pub fn as_hsla_f32(self: Color) -> [f32; 4] { match self { Color::Rgba { red, @@ -834,12 +835,8 @@ impl MulAssign for Color { match self { Color::Rgba { red, green, blue, .. - } => { - *red *= rhs; - *green *= rhs; - *blue *= rhs; } - Color::RgbaLinear { + | Color::RgbaLinear { red, green, blue, .. } => { *red *= rhs; @@ -910,13 +907,8 @@ impl MulAssign for Color { green, blue, alpha, - } => { - *red *= rhs.x; - *green *= rhs.y; - *blue *= rhs.z; - *alpha *= rhs.w; } - Color::RgbaLinear { + | Color::RgbaLinear { red, green, blue, @@ -989,12 +981,8 @@ impl MulAssign for Color { match self { Color::Rgba { red, green, blue, .. - } => { - *red *= rhs.x; - *green *= rhs.y; - *blue *= rhs.z; } - Color::RgbaLinear { + | Color::RgbaLinear { red, green, blue, .. } => { *red *= rhs.x; @@ -1065,13 +1053,8 @@ impl MulAssign<[f32; 4]> for Color { green, blue, alpha, - } => { - *red *= rhs[0]; - *green *= rhs[1]; - *blue *= rhs[2]; - *alpha *= rhs[3]; } - Color::RgbaLinear { + | Color::RgbaLinear { red, green, blue, @@ -1144,12 +1127,8 @@ impl MulAssign<[f32; 3]> for Color { match self { Color::Rgba { red, green, blue, .. - } => { - *red *= rhs[0]; - *green *= rhs[1]; - *blue *= rhs[2]; } - Color::RgbaLinear { + | Color::RgbaLinear { red, green, blue, .. } => { *red *= rhs[0]; @@ -1170,10 +1149,12 @@ impl MulAssign<[f32; 3]> for Color { } } -#[derive(Debug)] +#[derive(Debug, Error)] pub enum HexColorError { + #[error("Unexpected length of hex string")] Length, - Hex(hex::FromHexError), + #[error("Error parsing hex value")] + Hex(#[from] hex::FromHexError), } fn decode_rgb(data: &[u8]) -> Result { diff --git a/crates/bevy_render/src/render_component.rs b/crates/bevy_render/src/extract_component.rs similarity index 90% rename from crates/bevy_render/src/render_component.rs rename to crates/bevy_render/src/extract_component.rs index 7cd3158cb94d7..673b23c30770e 100644 --- a/crates/bevy_render/src/render_component.rs +++ b/crates/bevy_render/src/extract_component.rs @@ -1,5 +1,5 @@ use crate::{ - render_resource::{std140::AsStd140, DynamicUniformVec}, + render_resource::{encase::internal::WriteInto, DynamicUniformBuffer, ShaderType}, renderer::{RenderDevice, RenderQueue}, view::ComputedVisibility, RenderApp, RenderStage, @@ -58,7 +58,7 @@ impl Default for UniformComponentPlugin { } } -impl Plugin for UniformComponentPlugin { +impl Plugin for UniformComponentPlugin { fn build(&self, app: &mut App) { if let Ok(render_app) = app.get_sub_app_mut(RenderApp) { render_app @@ -69,12 +69,12 @@ impl Plugin for UniformComponentPlugin { } /// Stores all uniforms of the component type. -pub struct ComponentUniforms { - uniforms: DynamicUniformVec, +pub struct ComponentUniforms { + uniforms: DynamicUniformBuffer, } -impl Deref for ComponentUniforms { - type Target = DynamicUniformVec; +impl Deref for ComponentUniforms { + type Target = DynamicUniformBuffer; #[inline] fn deref(&self) -> &Self::Target { @@ -82,14 +82,14 @@ impl Deref for ComponentUniforms { } } -impl ComponentUniforms { +impl ComponentUniforms { #[inline] - pub fn uniforms(&self) -> &DynamicUniformVec { + pub fn uniforms(&self) -> &DynamicUniformBuffer { &self.uniforms } } -impl Default for ComponentUniforms { +impl Default for ComponentUniforms { fn default() -> Self { Self { uniforms: Default::default(), @@ -106,7 +106,7 @@ fn prepare_uniform_components( mut component_uniforms: ResMut>, components: Query<(Entity, &C)>, ) where - C: AsStd140 + Clone, + C: ShaderType + WriteInto + Clone, { component_uniforms.uniforms.clear(); let entities = components diff --git a/crates/bevy_render/src/extract_resource.rs b/crates/bevy_render/src/extract_resource.rs new file mode 100644 index 0000000000000..5db3be904b263 --- /dev/null +++ b/crates/bevy_render/src/extract_resource.rs @@ -0,0 +1,46 @@ +use std::marker::PhantomData; + +use bevy_app::{App, Plugin}; +use bevy_ecs::system::{Commands, Res, Resource}; +pub use bevy_render_macros::ExtractResource; + +use crate::{RenderApp, RenderStage}; + +/// Describes how a resource gets extracted for rendering. +/// +/// Therefore the resource is transferred from the "main world" into the "render world" +/// in the [`RenderStage::Extract`](crate::RenderStage::Extract) step. +pub trait ExtractResource: Resource { + type Source: Resource; + + /// Defines how the resource is transferred into the "render world". + fn extract_resource(source: &Self::Source) -> Self; +} + +/// This plugin extracts the resources into the "render world". +/// +/// Therefore it sets up the [`RenderStage::Extract`](crate::RenderStage::Extract) step +/// for the specified [`Resource`]. +pub struct ExtractResourcePlugin(PhantomData); + +impl Default for ExtractResourcePlugin { + fn default() -> Self { + Self(PhantomData) + } +} + +impl Plugin for ExtractResourcePlugin { + fn build(&self, app: &mut App) { + if let Ok(render_app) = app.get_sub_app_mut(RenderApp) { + render_app.add_system_to_stage(RenderStage::Extract, extract_resource::); + } + } +} + +/// This system extracts the resource of the corresponding [`Resource`] type +/// by cloning it. +pub fn extract_resource(mut commands: Commands, resource: Res) { + if resource.is_changed() { + commands.insert_resource(R::extract_resource(resource.into_inner())); + } +} diff --git a/crates/bevy_render/src/lib.rs b/crates/bevy_render/src/lib.rs index cb0d4b49eeff4..0caa1a75bc29c 100644 --- a/crates/bevy_render/src/lib.rs +++ b/crates/bevy_render/src/lib.rs @@ -2,10 +2,11 @@ extern crate core; pub mod camera; pub mod color; +pub mod extract_component; +pub mod extract_resource; pub mod mesh; pub mod primitives; pub mod render_asset; -pub mod render_component; pub mod render_graph; pub mod render_phase; pub mod render_resource; @@ -17,10 +18,7 @@ pub mod view; pub mod prelude { #[doc(hidden)] pub use crate::{ - camera::{ - Camera, OrthographicCameraBundle, OrthographicProjection, PerspectiveCameraBundle, - PerspectiveProjection, - }, + camera::{Camera, OrthographicProjection, PerspectiveProjection}, color::Color, mesh::{shape, Mesh}, render_resource::Shader, @@ -29,7 +27,6 @@ pub mod prelude { }; } -use bevy_utils::tracing::debug; pub use once_cell; use crate::{ @@ -46,6 +43,7 @@ use crate::{ use bevy_app::{App, AppLabel, Plugin}; use bevy_asset::{AddAsset, AssetServer}; use bevy_ecs::prelude::*; +use bevy_utils::tracing::debug; use std::ops::{Deref, DerefMut}; /// Contains the default Bevy rendering backend based on wgpu. @@ -98,6 +96,12 @@ impl DerefMut for RenderWorld { } } +pub mod main_graph { + pub mod node { + pub const CAMERA_DRIVER: &str = "camera_driver"; + } +} + /// A Label for the rendering sub-app. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, AppLabel)] pub struct RenderApp; @@ -170,6 +174,7 @@ impl Plugin for RenderPlugin { .with_system(render_system.exclusive_system().at_end()), ) .add_stage(RenderStage::Cleanup, SystemStage::parallel()) + .init_resource::() .insert_resource(instance) .insert_resource(device) .insert_resource(queue) diff --git a/crates/bevy_render/src/mesh/mesh/conversions.rs b/crates/bevy_render/src/mesh/mesh/conversions.rs index 627038943224d..cb4c85db89214 100644 --- a/crates/bevy_render/src/mesh/mesh/conversions.rs +++ b/crates/bevy_render/src/mesh/mesh/conversions.rs @@ -134,8 +134,9 @@ impl TryFrom for Vec<[u8; 4]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Uint8x4(value) => Ok(value), - VertexAttributeValues::Unorm8x4(value) => Ok(value), + VertexAttributeValues::Uint8x4(value) | VertexAttributeValues::Unorm8x4(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } @@ -146,8 +147,9 @@ impl TryFrom for Vec<[i8; 4]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Sint8x4(value) => Ok(value), - VertexAttributeValues::Snorm8x4(value) => Ok(value), + VertexAttributeValues::Sint8x4(value) | VertexAttributeValues::Snorm8x4(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } @@ -158,8 +160,9 @@ impl TryFrom for Vec<[u8; 2]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Uint8x2(value) => Ok(value), - VertexAttributeValues::Unorm8x2(value) => Ok(value), + VertexAttributeValues::Uint8x2(value) | VertexAttributeValues::Unorm8x2(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } @@ -170,8 +173,9 @@ impl TryFrom for Vec<[i8; 2]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Sint8x2(value) => Ok(value), - VertexAttributeValues::Snorm8x2(value) => Ok(value), + VertexAttributeValues::Sint8x2(value) | VertexAttributeValues::Snorm8x2(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } @@ -182,8 +186,9 @@ impl TryFrom for Vec<[i16; 4]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Sint16x4(value) => Ok(value), - VertexAttributeValues::Snorm16x4(value) => Ok(value), + VertexAttributeValues::Sint16x4(value) | VertexAttributeValues::Snorm16x4(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } @@ -194,8 +199,9 @@ impl TryFrom for Vec<[u16; 4]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Uint16x4(value) => Ok(value), - VertexAttributeValues::Unorm16x4(value) => Ok(value), + VertexAttributeValues::Uint16x4(value) | VertexAttributeValues::Unorm16x4(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } @@ -206,8 +212,9 @@ impl TryFrom for Vec<[u16; 2]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Uint16x2(value) => Ok(value), - VertexAttributeValues::Unorm16x2(value) => Ok(value), + VertexAttributeValues::Uint16x2(value) | VertexAttributeValues::Unorm16x2(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } @@ -218,8 +225,9 @@ impl TryFrom for Vec<[i16; 2]> { fn try_from(value: VertexAttributeValues) -> Result { match value { - VertexAttributeValues::Sint16x2(value) => Ok(value), - VertexAttributeValues::Snorm16x2(value) => Ok(value), + VertexAttributeValues::Sint16x2(value) | VertexAttributeValues::Snorm16x2(value) => { + Ok(value) + } _ => Err(FromVertexAttributeError::new::(value)), } } diff --git a/crates/bevy_render/src/mesh/mesh/mod.rs b/crates/bevy_render/src/mesh/mesh/mod.rs index 12623a27be54a..3193ebb5d1a09 100644 --- a/crates/bevy_render/src/mesh/mesh/mod.rs +++ b/crates/bevy_render/src/mesh/mesh/mod.rs @@ -14,7 +14,7 @@ use bevy_ecs::system::{lifetimeless::SRes, SystemParamItem}; use bevy_math::*; use bevy_reflect::TypeUuid; use bevy_utils::Hashed; -use std::{collections::BTreeMap, hash::Hash}; +use std::{collections::BTreeMap, hash::Hash, iter::FusedIterator}; use thiserror::Error; use wgpu::{ util::BufferInitDescriptor, BufferUsages, IndexFormat, VertexAttribute, VertexFormat, @@ -141,6 +141,22 @@ impl Mesh { .map(|data| &mut data.values) } + /// Returns an iterator that yields references to the data of each vertex attribute. + pub fn attributes( + &self, + ) -> impl Iterator { + self.attributes.iter().map(|(id, data)| (*id, &data.values)) + } + + /// Returns an iterator that yields mutable references to the data of each vertex attribute. + pub fn attributes_mut( + &mut self, + ) -> impl Iterator { + self.attributes + .iter_mut() + .map(|(id, data)| (*id, &mut data.values)) + } + /// Sets the vertex indices of the mesh. They describe how triangles are constructed out of the /// vertex attributes and are therefore only useful for the [`PrimitiveTopology`] variants /// that use triangles. @@ -203,7 +219,7 @@ impl Mesh { /// Panics if the attributes have different vertex counts. pub fn count_vertices(&self) -> usize { let mut vertex_count: Option = None; - for (attribute_id, attribute_data) in self.attributes.iter() { + for (attribute_id, attribute_data) in &self.attributes { let attribute_len = attribute_data.values.len(); if let Some(previous_vertex_count) = vertex_count { assert_eq!(previous_vertex_count, attribute_len, @@ -253,6 +269,7 @@ impl Mesh { /// /// This can dramatically increase the vertex count, so make sure this is what you want. /// Does nothing if no [Indices] are set. + #[allow(clippy::match_same_arms)] pub fn duplicate_vertices(&mut self) { fn duplicate(values: &[T], indices: impl Iterator) -> Vec { indices.map(|i| values[i]).collect() @@ -327,6 +344,16 @@ impl Mesh { self.insert_attribute(Mesh::ATTRIBUTE_NORMAL, normals); } + /// Generate tangents for the mesh using the `mikktspace` algorithm. + /// + /// Sets the [`Mesh::ATTRIBUTE_TANGENT`] attribute if successful. + /// Requires a [`PrimitiveTopology::TriangleList`] topology and the [`Mesh::ATTRIBUTE_POSITION`], [`Mesh::ATTRIBUTE_NORMAL`] and [`Mesh::ATTRIBUTE_UV_0`] attributes set. + pub fn generate_tangents(&mut self) -> Result<(), GenerateTangentsError> { + let tangents = generate_tangents_for_mesh(self)?; + self.insert_attribute(Mesh::ATTRIBUTE_TANGENT, tangents); + Ok(()) + } + /// Compute the Axis-Aligned Bounding Box of the mesh vertices in model space pub fn compute_aabb(&self) -> Option { if let Some(VertexAttributeValues::Float32x3(values)) = @@ -430,7 +457,7 @@ impl InnerMeshVertexBufferLayout { format: layout_attribute.format, offset: layout_attribute.offset, shader_location: attribute_descriptor.shader_location, - }) + }); } else { return Err(MissingVertexAttributeError { id: attribute_descriptor.id, @@ -491,6 +518,7 @@ pub trait VertexFormatSize { } impl VertexFormatSize for wgpu::VertexFormat { + #[allow(clippy::match_same_arms)] fn get_size(self) -> u64 { match self { VertexFormat::Uint8x2 => 2, @@ -568,6 +596,7 @@ pub enum VertexAttributeValues { impl VertexAttributeValues { /// Returns the number of vertices in this [`VertexAttributeValues`]. For a single /// mesh, all of the [`VertexAttributeValues`] must have the same length. + #[allow(clippy::match_same_arms)] pub fn len(&self) -> usize { match *self { VertexAttributeValues::Float32(ref values) => values.len(), @@ -617,6 +646,7 @@ impl VertexAttributeValues { // TODO: add vertex format as parameter here and perform type conversions /// Flattens the [`VertexAttributeValues`] into a sequence of bytes. This is /// useful for serialization and sending to the GPU. + #[allow(clippy::match_same_arms)] pub fn get_bytes(&self) -> &[u8] { match self { VertexAttributeValues::Float32(values) => cast_slice(&values[..]), @@ -735,8 +765,18 @@ impl Iterator for IndicesIter<'_> { IndicesIter::U32(iter) => iter.next().map(|val| *val as usize), } } + + fn size_hint(&self) -> (usize, Option) { + match self { + IndicesIter::U16(iter) => iter.size_hint(), + IndicesIter::U32(iter) => iter.size_hint(), + } + } } +impl<'a> ExactSizeIterator for IndicesIter<'a> {} +impl<'a> FusedIterator for IndicesIter<'a> {} + impl From<&Indices> for IndexFormat { fn from(indices: &Indices) -> Self { match indices { @@ -818,3 +858,129 @@ impl RenderAsset for Mesh { }) } } + +struct MikktspaceGeometryHelper<'a> { + indices: &'a Indices, + positions: &'a Vec<[f32; 3]>, + normals: &'a Vec<[f32; 3]>, + uvs: &'a Vec<[f32; 2]>, + tangents: Vec<[f32; 4]>, +} + +impl MikktspaceGeometryHelper<'_> { + fn index(&self, face: usize, vert: usize) -> usize { + let index_index = face * 3 + vert; + + match self.indices { + Indices::U16(indices) => indices[index_index] as usize, + Indices::U32(indices) => indices[index_index] as usize, + } + } +} + +impl bevy_mikktspace::Geometry for MikktspaceGeometryHelper<'_> { + fn num_faces(&self) -> usize { + self.indices.len() / 3 + } + + fn num_vertices_of_face(&self, _: usize) -> usize { + 3 + } + + fn position(&self, face: usize, vert: usize) -> [f32; 3] { + self.positions[self.index(face, vert)] + } + + fn normal(&self, face: usize, vert: usize) -> [f32; 3] { + self.normals[self.index(face, vert)] + } + + fn tex_coord(&self, face: usize, vert: usize) -> [f32; 2] { + self.uvs[self.index(face, vert)] + } + + fn set_tangent_encoded(&mut self, tangent: [f32; 4], face: usize, vert: usize) { + let idx = self.index(face, vert); + self.tangents[idx] = tangent; + } +} + +#[derive(thiserror::Error, Debug)] +/// Failed to generate tangents for the mesh. +pub enum GenerateTangentsError { + #[error("cannot generate tangents for {0:?}")] + UnsupportedTopology(PrimitiveTopology), + #[error("missing indices")] + MissingIndices, + #[error("missing vertex attributes '{0}'")] + MissingVertexAttribute(&'static str), + #[error("the '{0}' vertex attribute should have {1:?} format")] + InvalidVertexAttributeFormat(&'static str, VertexFormat), + #[error("mesh not suitable for tangent generation")] + MikktspaceError, +} + +fn generate_tangents_for_mesh(mesh: &Mesh) -> Result, GenerateTangentsError> { + match mesh.primitive_topology() { + PrimitiveTopology::TriangleList => {} + other => return Err(GenerateTangentsError::UnsupportedTopology(other)), + }; + + let positions = match mesh.attribute(Mesh::ATTRIBUTE_POSITION).ok_or( + GenerateTangentsError::MissingVertexAttribute(Mesh::ATTRIBUTE_POSITION.name), + )? { + VertexAttributeValues::Float32x3(vertices) => vertices, + _ => { + return Err(GenerateTangentsError::InvalidVertexAttributeFormat( + Mesh::ATTRIBUTE_POSITION.name, + VertexFormat::Float32x3, + )) + } + }; + let normals = match mesh.attribute(Mesh::ATTRIBUTE_NORMAL).ok_or( + GenerateTangentsError::MissingVertexAttribute(Mesh::ATTRIBUTE_NORMAL.name), + )? { + VertexAttributeValues::Float32x3(vertices) => vertices, + _ => { + return Err(GenerateTangentsError::InvalidVertexAttributeFormat( + Mesh::ATTRIBUTE_NORMAL.name, + VertexFormat::Float32x3, + )) + } + }; + let uvs = match mesh.attribute(Mesh::ATTRIBUTE_UV_0).ok_or( + GenerateTangentsError::MissingVertexAttribute(Mesh::ATTRIBUTE_UV_0.name), + )? { + VertexAttributeValues::Float32x2(vertices) => vertices, + _ => { + return Err(GenerateTangentsError::InvalidVertexAttributeFormat( + Mesh::ATTRIBUTE_UV_0.name, + VertexFormat::Float32x2, + )) + } + }; + let indices = mesh + .indices() + .ok_or(GenerateTangentsError::MissingIndices)?; + + let len = positions.len(); + let tangents = vec![[0., 0., 0., 0.]; len]; + let mut mikktspace_mesh = MikktspaceGeometryHelper { + indices, + positions, + normals, + uvs, + tangents, + }; + let success = bevy_mikktspace::generate_tangents(&mut mikktspace_mesh); + if !success { + return Err(GenerateTangentsError::MikktspaceError); + } + + // mikktspace seems to assume left-handedness so we can flip the sign to correct for this + for tangent in &mut mikktspace_mesh.tangents { + tangent[3] = -tangent[3]; + } + + Ok(mikktspace_mesh.tangents) +} diff --git a/crates/bevy_render/src/render_asset.rs b/crates/bevy_render/src/render_asset.rs index 3ee311ffda79e..22936f3e677be 100644 --- a/crates/bevy_render/src/render_asset.rs +++ b/crates/bevy_render/src/render_asset.rs @@ -135,10 +135,7 @@ fn extract_render_asset( let mut removed = Vec::new(); for event in events.iter() { match event { - AssetEvent::Created { handle } => { - changed_assets.insert(handle); - } - AssetEvent::Modified { handle } => { + AssetEvent::Created { handle } | AssetEvent::Modified { handle } => { changed_assets.insert(handle); } AssetEvent::Removed { handle } => { diff --git a/crates/bevy_render/src/render_graph/edge.rs b/crates/bevy_render/src/render_graph/edge.rs index 0b3fe5432e079..88bfe24f9c913 100644 --- a/crates/bevy_render/src/render_graph/edge.rs +++ b/crates/bevy_render/src/render_graph/edge.rs @@ -35,16 +35,14 @@ impl Edge { /// Returns the id of the `input_node`. pub fn get_input_node(&self) -> NodeId { match self { - Edge::SlotEdge { input_node, .. } => *input_node, - Edge::NodeEdge { input_node, .. } => *input_node, + Edge::SlotEdge { input_node, .. } | Edge::NodeEdge { input_node, .. } => *input_node, } } /// Returns the id of the `output_node`. pub fn get_output_node(&self) -> NodeId { match self { - Edge::SlotEdge { output_node, .. } => *output_node, - Edge::NodeEdge { output_node, .. } => *output_node, + Edge::SlotEdge { output_node, .. } | Edge::NodeEdge { output_node, .. } => *output_node, } } } diff --git a/crates/bevy_render/src/render_graph/graph.rs b/crates/bevy_render/src/render_graph/graph.rs index 145e6cd917f88..c93e513cfcafd 100644 --- a/crates/bevy_render/src/render_graph/graph.rs +++ b/crates/bevy_render/src/render_graph/graph.rs @@ -114,17 +114,8 @@ impl RenderGraph { // node, we don't need to remove its input edges for input_edge in node_state.edges.input_edges().iter() { match input_edge { - Edge::SlotEdge { - output_node, - output_index: _, - input_node: _, - input_index: _, - } => { - if let Ok(output_node) = self.get_node_state_mut(*output_node) { - output_node.edges.remove_output_edge(input_edge.clone())?; - } - } - Edge::NodeEdge { + Edge::SlotEdge { output_node, .. } + | Edge::NodeEdge { input_node: _, output_node, } => { @@ -143,12 +134,8 @@ impl RenderGraph { output_index: _, input_node, input_index: _, - } => { - if let Ok(input_node) = self.get_node_state_mut(*input_node) { - input_node.edges.remove_input_edge(output_edge.clone())?; - } } - Edge::NodeEdge { + | Edge::NodeEdge { output_node: _, input_node, } => { diff --git a/crates/bevy_render/src/render_graph/node.rs b/crates/bevy_render/src/render_graph/node.rs index 5b806ad31b1e5..ab60925e6fbe1 100644 --- a/crates/bevy_render/src/render_graph/node.rs +++ b/crates/bevy_render/src/render_graph/node.rs @@ -1,7 +1,7 @@ use crate::{ render_graph::{ Edge, InputSlotError, OutputSlotError, RenderGraphContext, RenderGraphError, - RunSubGraphError, SlotInfo, SlotInfos, + RunSubGraphError, SlotInfo, SlotInfos, SlotType, SlotValue, }, renderer::RenderContext, }; @@ -331,3 +331,37 @@ impl Node for EmptyNode { Ok(()) } } + +/// A [`RenderGraph`](super::RenderGraph) [`Node`] that takes a view entity as input and runs the configured graph name once. +/// This makes it easier to insert sub-graph runs into a graph. +pub struct RunGraphOnViewNode { + graph_name: Cow<'static, str>, +} + +impl RunGraphOnViewNode { + pub const IN_VIEW: &'static str = "view"; + pub fn new>>(graph_name: T) -> Self { + Self { + graph_name: graph_name.into(), + } + } +} + +impl Node for RunGraphOnViewNode { + fn input(&self) -> Vec { + vec![SlotInfo::new(Self::IN_VIEW, SlotType::Entity)] + } + fn run( + &self, + graph: &mut RenderGraphContext, + _render_context: &mut RenderContext, + _world: &World, + ) -> Result<(), NodeRunError> { + let view_entity = graph.get_input_entity(Self::IN_VIEW)?; + graph.run_sub_graph( + self.graph_name.clone(), + vec![SlotValue::Entity(view_entity)], + )?; + Ok(()) + } +} diff --git a/crates/bevy_render/src/render_phase/draw_state.rs b/crates/bevy_render/src/render_phase/draw_state.rs index 81a5f9590d3fc..a709c078f921d 100644 --- a/crates/bevy_render/src/render_phase/draw_state.rs +++ b/crates/bevy_render/src/render_phase/draw_state.rs @@ -1,4 +1,5 @@ use crate::{ + camera::Viewport, prelude::Color, render_resource::{ BindGroup, BindGroupId, Buffer, BufferId, BufferSlice, RenderPipeline, RenderPipelineId, @@ -135,14 +136,14 @@ impl<'a> TrackedRenderPass<'a> { dynamic_uniform_indices ); return; - } else { - trace!( - "set bind_group {}: {:?} ({:?})", - index, - bind_group, - dynamic_uniform_indices - ); } + trace!( + "set bind_group {}: {:?} ({:?})", + index, + bind_group, + dynamic_uniform_indices + ); + self.pass .set_bind_group(index as u32, bind_group, dynamic_uniform_indices); self.state @@ -169,14 +170,14 @@ impl<'a> TrackedRenderPass<'a> { offset ); return; - } else { - trace!( - "set vertex buffer {}: {:?} ({})", - slot_index, - buffer_slice.id(), - offset - ); } + trace!( + "set vertex buffer {}: {:?} ({})", + slot_index, + buffer_slice.id(), + offset + ); + self.pass .set_vertex_buffer(slot_index as u32, *buffer_slice); self.state @@ -203,9 +204,8 @@ impl<'a> TrackedRenderPass<'a> { offset ); return; - } else { - trace!("set index buffer: {:?} ({})", buffer_slice.id(), offset); } + trace!("set index buffer: {:?} ({})", buffer_slice.id(), offset); self.pass.set_index_buffer(*buffer_slice, index_format); self.state .set_index_buffer(buffer_slice.id(), offset, index_format); @@ -337,6 +337,20 @@ impl<'a> TrackedRenderPass<'a> { .set_viewport(x, y, width, height, min_depth, max_depth); } + /// Set the rendering viewport to the given [`Camera`](crate::camera::Viewport) [`Viewport`]. + /// + /// Subsequent draw calls will be projected into that viewport. + pub fn set_camera_viewport(&mut self, viewport: &Viewport) { + self.set_viewport( + viewport.physical_position.x as f32, + viewport.physical_position.y as f32, + viewport.physical_size.x as f32, + viewport.physical_size.y as f32, + viewport.depth.start, + viewport.depth.end, + ); + } + /// Insert a single debug marker. /// /// This is a GPU debugging feature. This has no effect on the rendering itself. diff --git a/crates/bevy_render/src/render_resource/buffer_vec.rs b/crates/bevy_render/src/render_resource/buffer_vec.rs index ad8f5ff2adfe1..6e2deb3009018 100644 --- a/crates/bevy_render/src/render_resource/buffer_vec.rs +++ b/crates/bevy_render/src/render_resource/buffer_vec.rs @@ -14,23 +14,14 @@ pub struct BufferVec { buffer_usage: BufferUsages, } -impl Default for BufferVec { - fn default() -> Self { +impl BufferVec { + pub const fn new(buffer_usage: BufferUsages) -> Self { Self { values: Vec::new(), buffer: None, capacity: 0, - buffer_usage: BufferUsages::all(), item_size: std::mem::size_of::(), - } - } -} - -impl BufferVec { - pub fn new(buffer_usage: BufferUsages) -> Self { - Self { buffer_usage, - ..Default::default() } } diff --git a/crates/bevy_render/src/render_resource/mod.rs b/crates/bevy_render/src/render_resource/mod.rs index 81fa1618dfc8e..c09566f71604b 100644 --- a/crates/bevy_render/src/render_resource/mod.rs +++ b/crates/bevy_render/src/render_resource/mod.rs @@ -8,7 +8,7 @@ mod pipeline_specializer; mod shader; mod storage_buffer; mod texture; -mod uniform_vec; +mod uniform_buffer; pub use bind_group::*; pub use bind_group_layout::*; @@ -20,7 +20,7 @@ pub use pipeline_specializer::*; pub use shader::*; pub use storage_buffer::*; pub use texture::*; -pub use uniform_vec::*; +pub use uniform_buffer::*; // TODO: decide where re-exports should go pub use wgpu::{ @@ -44,6 +44,11 @@ pub use wgpu::{ VertexStepMode, }; -pub use bevy_crevice::*; +pub mod encase { + pub use bevy_encase_derive::ShaderType; + pub use encase::*; +} + +pub use self::encase::{ShaderType, Size as ShaderSize}; pub use naga::ShaderStage; diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 559ff745fa314..71021fbde9cca 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -13,9 +13,12 @@ use bevy_asset::{AssetEvent, Assets, Handle}; use bevy_ecs::event::EventReader; use bevy_ecs::system::{Res, ResMut}; use bevy_utils::{default, tracing::error, Entry, HashMap, HashSet}; -use std::{hash::Hash, mem, ops::Deref, sync::Arc}; +use std::{hash::Hash, iter::FusedIterator, mem, ops::Deref, sync::Arc}; use thiserror::Error; -use wgpu::{PipelineLayoutDescriptor, ShaderModule, VertexBufferLayout as RawVertexBufferLayout}; +use wgpu::{ + BufferBindingType, PipelineLayoutDescriptor, ShaderModule, + VertexBufferLayout as RawVertexBufferLayout, +}; enum PipelineDescriptor { RenderPipelineDescriptor(Box), @@ -117,13 +120,32 @@ impl ShaderCache { let module = match data.processed_shaders.entry(shader_defs.to_vec()) { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { + let mut shader_defs = shader_defs.to_vec(); + #[cfg(feature = "webgl")] + shader_defs.push(String::from("NO_ARRAY_TEXTURES_SUPPORT")); + + // TODO: 3 is the value from CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT declared in bevy_pbr + // consider exposing this in shaders in a more generally useful way, such as: + // # if AVAILABLE_STORAGE_BUFFER_BINDINGS == 3 + // /* use storage buffers here */ + // # elif + // /* use uniforms here */ + if !matches!( + render_device.get_supported_read_only_binding_type(3), + BufferBindingType::Storage { .. } + ) { + shader_defs.push(String::from("NO_STORAGE_BUFFERS_SUPPORT")); + } + let processed = self.processor.process( shader, - shader_defs, + &shader_defs, &self.shaders, &self.import_path_shaders, )?; - let module_descriptor = match processed.get_module_descriptor() { + let module_descriptor = match processed + .get_module_descriptor(render_device.features()) + { Ok(module_descriptor) => module_descriptor, Err(err) => { return Err(PipelineCacheError::AsModuleDescriptorError(err, processed)); @@ -670,3 +692,5 @@ impl<'a> Iterator for ErrorSources<'a> { current } } + +impl<'a> FusedIterator for ErrorSources<'a> {} diff --git a/crates/bevy_render/src/render_resource/shader.rs b/crates/bevy_render/src/render_resource/shader.rs index 38b8843258805..475654e5a13d2 100644 --- a/crates/bevy_render/src/render_resource/shader.rs +++ b/crates/bevy_render/src/render_resource/shader.rs @@ -2,6 +2,7 @@ use bevy_asset::{AssetLoader, Handle, LoadContext, LoadedAsset}; use bevy_reflect::{TypeUuid, Uuid}; use bevy_utils::{tracing::error, BoxedFuture, HashMap}; use naga::back::wgsl::WriterFlags; +use naga::valid::Capabilities; use naga::{valid::ModuleInfo, Module}; use once_cell::sync::Lazy; use regex::Regex; @@ -9,6 +10,7 @@ use std::{ borrow::Cow, collections::HashSet, marker::Copy, ops::Deref, path::PathBuf, str::FromStr, }; use thiserror::Error; +use wgpu::Features; use wgpu::{util::make_spirv, ShaderModuleDescriptor, ShaderSource}; #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] @@ -125,7 +127,7 @@ impl ProcessedShader { } } - pub fn reflect(&self) -> Result { + pub fn reflect(&self, features: Features) -> Result { let module = match &self { // TODO: process macros here ProcessedShader::Wgsl(source) => naga::front::wgsl::parse_str(source)?, @@ -143,11 +145,23 @@ impl ProcessedShader { }, )?, }; - let module_info = naga::valid::Validator::new( - naga::valid::ValidationFlags::default(), - naga::valid::Capabilities::default(), - ) - .validate(&module)?; + const CAPABILITIES: &[(Features, Capabilities)] = &[ + (Features::PUSH_CONSTANTS, Capabilities::PUSH_CONSTANT), + (Features::SHADER_FLOAT64, Capabilities::FLOAT64), + ( + Features::SHADER_PRIMITIVE_INDEX, + Capabilities::PRIMITIVE_INDEX, + ), + ]; + let mut capabilities = Capabilities::empty(); + for (feature, capability) in CAPABILITIES { + if features.contains(*feature) { + capabilities |= *capability; + } + } + let module_info = + naga::valid::Validator::new(naga::valid::ValidationFlags::default(), capabilities) + .validate(&module)?; Ok(ShaderReflection { module, @@ -155,7 +169,10 @@ impl ProcessedShader { }) } - pub fn get_module_descriptor(&self) -> Result { + pub fn get_module_descriptor( + &self, + features: Features, + ) -> Result { Ok(ShaderModuleDescriptor { label: None, source: match self { @@ -164,12 +181,12 @@ impl ProcessedShader { // Parse and validate the shader early, so that (e.g. while hot reloading) we can // display nicely formatted error messages instead of relying on just displaying the error string // returned by wgpu upon creating the shader module. - let _ = self.reflect()?; + let _ = self.reflect(features)?; ShaderSource::Wgsl(source.clone()) } ProcessedShader::Glsl(_source, _stage) => { - let reflection = self.reflect()?; + let reflection = self.reflect(features)?; // TODO: it probably makes more sense to convert this to spirv, but as of writing // this comment, naga's spirv conversion is broken let wgsl = reflection.get_wgsl()?; @@ -379,9 +396,8 @@ impl ShaderProcessor { Source::SpirV(source) => { if shader_defs.is_empty() { return Ok(ProcessedShader::SpirV(source.clone())); - } else { - return Err(ProcessShaderError::ShaderFormatDoesNotSupportShaderDefs); } + return Err(ProcessShaderError::ShaderFormatDoesNotSupportShaderDefs); } }; diff --git a/crates/bevy_render/src/render_resource/storage_buffer.rs b/crates/bevy_render/src/render_resource/storage_buffer.rs index 620676ad81698..96c24a2c090eb 100644 --- a/crates/bevy_render/src/render_resource/storage_buffer.rs +++ b/crates/bevy_render/src/render_resource/storage_buffer.rs @@ -1,214 +1,153 @@ use super::Buffer; use crate::renderer::{RenderDevice, RenderQueue}; -use bevy_crevice::std430::{self, AsStd430, Std430}; -use bevy_utils::tracing::warn; -use std::num::NonZeroU64; -use wgpu::{BindingResource, BufferBinding, BufferDescriptor, BufferUsages}; - -/// A helper for a storage buffer binding with a body, or a variable-sized array, or both. -pub struct StorageBuffer { - body: U, - values: Vec, - scratch: Vec, - storage_buffer: Option, +use encase::{ + internal::WriteInto, DynamicStorageBuffer as DynamicStorageBufferWrapper, ShaderType, + StorageBuffer as StorageBufferWrapper, +}; +use wgpu::{util::BufferInitDescriptor, BindingResource, BufferBinding, BufferUsages}; + +pub struct StorageBuffer { + value: T, + scratch: StorageBufferWrapper>, + buffer: Option, + capacity: usize, } -impl Default for StorageBuffer { - /// Creates a new [`StorageBuffer`] - /// - /// This does not immediately allocate system/video RAM buffers. - fn default() -> Self { +impl From for StorageBuffer { + fn from(value: T) -> Self { Self { - body: U::default(), - values: Vec::new(), - scratch: Vec::new(), - storage_buffer: None, + value, + scratch: StorageBufferWrapper::new(Vec::new()), + buffer: None, + capacity: 0, } } } -impl StorageBuffer { - // NOTE: AsStd430::std430_size_static() uses size_of internally but trait functions cannot be - // marked as const functions - const BODY_SIZE: usize = std::mem::size_of::(); - const ITEM_SIZE: usize = std::mem::size_of::(); +impl Default for StorageBuffer { + fn default() -> Self { + Self { + value: T::default(), + scratch: StorageBufferWrapper::new(Vec::new()), + buffer: None, + capacity: 0, + } + } +} - /// Gets the reference to the underlying buffer, if one has been allocated. +impl StorageBuffer { #[inline] pub fn buffer(&self) -> Option<&Buffer> { - self.storage_buffer.as_ref() + self.buffer.as_ref() } #[inline] pub fn binding(&self) -> Option { - Some(BindingResource::Buffer(BufferBinding { - buffer: self.buffer()?, - offset: 0, - size: Some(NonZeroU64::new((self.size()) as u64).unwrap()), - })) + Some(BindingResource::Buffer( + self.buffer()?.as_entire_buffer_binding(), + )) } - #[inline] - pub fn set_body(&mut self, body: U) { - self.body = body; + pub fn set(&mut self, value: T) { + self.value = value; } - fn reserve_buffer(&mut self, device: &RenderDevice) -> bool { - let size = self.size(); - if self.storage_buffer.is_none() || size > self.scratch.len() { - self.scratch.resize(size, 0); - self.storage_buffer = Some(device.create_buffer(&BufferDescriptor { - label: None, - size: size as wgpu::BufferAddress, - usage: BufferUsages::COPY_DST | BufferUsages::STORAGE, - mapped_at_creation: false, - })); - true - } else { - false - } + pub fn get(&self) -> &T { + &self.value } - fn size(&self) -> usize { - let mut size = 0; - size += Self::BODY_SIZE; - if Self::ITEM_SIZE > 0 { - if size > 0 { - // Pad according to the array item type's alignment - size = (size + ::Output::ALIGNMENT - 1) - & !(::Output::ALIGNMENT - 1); - } - // Variable size arrays must have at least 1 element - size += Self::ITEM_SIZE * self.values.len().max(1); - } - size + pub fn get_mut(&mut self) -> &mut T { + &mut self.value } pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { - self.reserve_buffer(device); - if let Some(storage_buffer) = &self.storage_buffer { - let range = 0..self.size(); - let mut writer = std430::Writer::new(&mut self.scratch[range.clone()]); - let mut offset = 0; - // First write the struct body if there is one - if Self::BODY_SIZE > 0 { - if let Ok(new_offset) = writer.write(&self.body).map_err(|e| warn!("{:?}", e)) { - offset = new_offset; - } - } - if Self::ITEM_SIZE > 0 { - if self.values.is_empty() { - // Zero-out the padding and dummy array item in the case of the array being empty - for i in offset..self.size() { - self.scratch[i] = 0; - } - } else { - // Then write the array. Note that padding bytes may be added between the body - // and the array in order to align the array to the alignment requirements of its - // items - writer - .write(self.values.as_slice()) - .map_err(|e| warn!("{:?}", e)) - .ok(); - } - } - queue.write_buffer(storage_buffer, 0, &self.scratch[range]); + self.scratch.write(&self.value).unwrap(); + + let size = self.scratch.as_ref().len(); + + if self.capacity < size { + self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { + label: None, + usage: BufferUsages::COPY_DST | BufferUsages::STORAGE, + contents: self.scratch.as_ref(), + })); + self.capacity = size; + } else if let Some(buffer) = &self.buffer { + queue.write_buffer(buffer, 0, self.scratch.as_ref()); } } +} - pub fn values(&self) -> &[T] { - &self.values - } +pub struct DynamicStorageBuffer { + values: Vec, + scratch: DynamicStorageBufferWrapper>, + buffer: Option, + capacity: usize, +} - pub fn values_mut(&mut self) -> &mut [T] { - &mut self.values +impl Default for DynamicStorageBuffer { + fn default() -> Self { + Self { + values: Vec::new(), + scratch: DynamicStorageBufferWrapper::new(Vec::new()), + buffer: None, + capacity: 0, + } } +} +impl DynamicStorageBuffer { #[inline] - pub fn clear(&mut self) { - self.values.clear(); + pub fn buffer(&self) -> Option<&Buffer> { + self.buffer.as_ref() } #[inline] - pub fn push(&mut self, value: T) { - self.values.push(value); + pub fn binding(&self) -> Option { + Some(BindingResource::Buffer(BufferBinding { + buffer: self.buffer()?, + offset: 0, + size: Some(T::min_size()), + })) } #[inline] - pub fn append(&mut self, values: &mut Vec) { - self.values.append(values); - } -} - -#[cfg(test)] -mod tests { - use super::StorageBuffer; - use bevy_crevice::std430; - use bevy_crevice::std430::AsStd430; - use bevy_crevice::std430::Std430; - use bevy_math::Vec3; - use bevy_math::Vec4; - - //Note: - //A Vec3 has 12 bytes and needs to be padded to 16 bytes, when converted to std430 - //https://www.w3.org/TR/WGSL/#alignment-and-size - #[derive(AsStd430, Default)] - struct NotInherentlyAligned { - data: Vec3, + pub fn len(&self) -> usize { + self.values.len() } - //Note: - //A Vec4 has 16 bytes and does not need to be padded to fit in std430 - //https://www.w3.org/TR/WGSL/#alignment-and-size - #[derive(AsStd430)] - struct InherentlyAligned { - data: Vec4, + #[inline] + pub fn is_empty(&self) -> bool { + self.values.is_empty() } - #[test] - fn storage_buffer_correctly_sized_nonaligned() { - let mut buffer: StorageBuffer = StorageBuffer::default(); - buffer.push(NotInherentlyAligned { data: Vec3::ONE }); - - let actual_size = buffer.size(); - - let data = [NotInherentlyAligned { data: Vec3::ONE }].as_std430(); - let data_as_bytes = data.as_bytes(); - - assert_eq!(actual_size, data_as_bytes.len()); + #[inline] + pub fn push(&mut self, value: T) -> u32 { + let offset = self.scratch.write(&value).unwrap() as u32; + self.values.push(value); + offset } - #[test] - fn storage_buffer_correctly_sized_aligned() { - let mut buffer: StorageBuffer = StorageBuffer::default(); - buffer.push(InherentlyAligned { data: Vec4::ONE }); - - let actual_size = buffer.size(); - - let data = [InherentlyAligned { data: Vec4::ONE }].as_std430(); - let data_as_bytes = data.as_bytes(); + #[inline] + pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { + let size = self.scratch.as_ref().len(); - assert_eq!(actual_size, data_as_bytes.len()); + if self.capacity < size { + self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { + label: None, + usage: BufferUsages::COPY_DST | BufferUsages::STORAGE, + contents: self.scratch.as_ref(), + })); + self.capacity = size; + } else if let Some(buffer) = &self.buffer { + queue.write_buffer(buffer, 0, self.scratch.as_ref()); + } } - #[test] - fn storage_buffer_correctly_sized_item_and_body() { - let mut buffer: StorageBuffer = - StorageBuffer::default(); - buffer.push(NotInherentlyAligned { data: Vec3::ONE }); - buffer.set_body(NotInherentlyAligned { data: Vec3::ONE }); - - let calculated_size = buffer.size(); - - //Emulate Write - let mut scratch = Vec::::new(); - scratch.resize(calculated_size, 0); - let mut writer = std430::Writer::new(&mut scratch[0..calculated_size]); - writer - .write(&buffer.body) - .expect("Buffer has enough space to write the body."); - writer - .write(buffer.values.as_slice()) - .expect("Buffer has enough space to write the values."); + #[inline] + pub fn clear(&mut self) { + self.values.clear(); + self.scratch.as_mut().clear(); + self.scratch.set_offset(0); } } diff --git a/crates/bevy_render/src/render_resource/uniform_buffer.rs b/crates/bevy_render/src/render_resource/uniform_buffer.rs new file mode 100644 index 0000000000000..6f9ad642dcc48 --- /dev/null +++ b/crates/bevy_render/src/render_resource/uniform_buffer.rs @@ -0,0 +1,150 @@ +use crate::{ + render_resource::Buffer, + renderer::{RenderDevice, RenderQueue}, +}; +use encase::{ + internal::WriteInto, DynamicUniformBuffer as DynamicUniformBufferWrapper, ShaderType, + UniformBuffer as UniformBufferWrapper, +}; +use wgpu::{util::BufferInitDescriptor, BindingResource, BufferBinding, BufferUsages}; + +pub struct UniformBuffer { + value: T, + scratch: UniformBufferWrapper>, + buffer: Option, +} + +impl From for UniformBuffer { + fn from(value: T) -> Self { + Self { + value, + scratch: UniformBufferWrapper::new(Vec::new()), + buffer: None, + } + } +} + +impl Default for UniformBuffer { + fn default() -> Self { + Self { + value: T::default(), + scratch: UniformBufferWrapper::new(Vec::new()), + buffer: None, + } + } +} + +impl UniformBuffer { + #[inline] + pub fn buffer(&self) -> Option<&Buffer> { + self.buffer.as_ref() + } + + #[inline] + pub fn binding(&self) -> Option { + Some(BindingResource::Buffer( + self.buffer()?.as_entire_buffer_binding(), + )) + } + + pub fn set(&mut self, value: T) { + self.value = value; + } + + pub fn get(&self) -> &T { + &self.value + } + + pub fn get_mut(&mut self) -> &mut T { + &mut self.value + } + + pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { + self.scratch.write(&self.value).unwrap(); + + match &self.buffer { + Some(buffer) => queue.write_buffer(buffer, 0, self.scratch.as_ref()), + None => { + self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { + label: None, + usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, + contents: self.scratch.as_ref(), + })); + } + } + } +} + +pub struct DynamicUniformBuffer { + values: Vec, + scratch: DynamicUniformBufferWrapper>, + buffer: Option, + capacity: usize, +} + +impl Default for DynamicUniformBuffer { + fn default() -> Self { + Self { + values: Vec::new(), + scratch: DynamicUniformBufferWrapper::new(Vec::new()), + buffer: None, + capacity: 0, + } + } +} + +impl DynamicUniformBuffer { + #[inline] + pub fn buffer(&self) -> Option<&Buffer> { + self.buffer.as_ref() + } + + #[inline] + pub fn binding(&self) -> Option { + Some(BindingResource::Buffer(BufferBinding { + buffer: self.buffer()?, + offset: 0, + size: Some(T::min_size()), + })) + } + + #[inline] + pub fn len(&self) -> usize { + self.values.len() + } + + #[inline] + pub fn is_empty(&self) -> bool { + self.values.is_empty() + } + + #[inline] + pub fn push(&mut self, value: T) -> u32 { + let offset = self.scratch.write(&value).unwrap() as u32; + self.values.push(value); + offset + } + + #[inline] + pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { + let size = self.scratch.as_ref().len(); + + if self.capacity < size { + self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { + label: None, + usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, + contents: self.scratch.as_ref(), + })); + self.capacity = size; + } else if let Some(buffer) = &self.buffer { + queue.write_buffer(buffer, 0, self.scratch.as_ref()); + } + } + + #[inline] + pub fn clear(&mut self) { + self.values.clear(); + self.scratch.as_mut().clear(); + self.scratch.set_offset(0); + } +} diff --git a/crates/bevy_render/src/render_resource/uniform_vec.rs b/crates/bevy_render/src/render_resource/uniform_vec.rs deleted file mode 100644 index f31e7e258d6a2..0000000000000 --- a/crates/bevy_render/src/render_resource/uniform_vec.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::{ - render_resource::std140::{self, AsStd140, DynamicUniform, Std140}, - render_resource::Buffer, - renderer::{RenderDevice, RenderQueue}, -}; -use std::num::NonZeroU64; -use wgpu::{BindingResource, BufferBinding, BufferDescriptor, BufferUsages}; - -pub struct UniformVec { - values: Vec, - scratch: Vec, - uniform_buffer: Option, - capacity: usize, - item_size: usize, -} - -impl Default for UniformVec { - fn default() -> Self { - Self { - values: Vec::new(), - scratch: Vec::new(), - uniform_buffer: None, - capacity: 0, - item_size: (T::std140_size_static() + ::Output::ALIGNMENT - 1) - & !(::Output::ALIGNMENT - 1), - } - } -} - -impl UniformVec { - #[inline] - pub fn uniform_buffer(&self) -> Option<&Buffer> { - self.uniform_buffer.as_ref() - } - - #[inline] - pub fn binding(&self) -> Option { - Some(BindingResource::Buffer(BufferBinding { - buffer: self.uniform_buffer()?, - offset: 0, - size: Some(NonZeroU64::new(self.item_size as u64).unwrap()), - })) - } - - #[inline] - pub fn len(&self) -> usize { - self.values.len() - } - - #[inline] - pub fn is_empty(&self) -> bool { - self.values.is_empty() - } - - #[inline] - pub fn capacity(&self) -> usize { - self.capacity - } - - pub fn push(&mut self, value: T) -> usize { - let index = self.values.len(); - self.values.push(value); - index - } - - pub fn get_mut(&mut self, index: usize) -> &mut T { - &mut self.values[index] - } - - pub fn reserve(&mut self, capacity: usize, device: &RenderDevice) -> bool { - if capacity > self.capacity { - self.capacity = capacity; - let size = self.item_size * capacity; - self.scratch.resize(size, 0); - self.uniform_buffer = Some(device.create_buffer(&BufferDescriptor { - label: None, - size: size as wgpu::BufferAddress, - usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, - mapped_at_creation: false, - })); - true - } else { - false - } - } - - pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { - if self.values.is_empty() { - return; - } - self.reserve(self.values.len(), device); - if let Some(uniform_buffer) = &self.uniform_buffer { - let range = 0..self.item_size * self.values.len(); - let mut writer = std140::Writer::new(&mut self.scratch[range.clone()]); - writer.write(self.values.as_slice()).unwrap(); - queue.write_buffer(uniform_buffer, 0, &self.scratch[range]); - } - } - - pub fn clear(&mut self) { - self.values.clear(); - } - - pub fn values(&self) -> &[T] { - &self.values - } -} - -pub struct DynamicUniformVec { - uniform_vec: UniformVec>, -} - -impl Default for DynamicUniformVec { - fn default() -> Self { - Self { - uniform_vec: Default::default(), - } - } -} - -impl DynamicUniformVec { - #[inline] - pub fn uniform_buffer(&self) -> Option<&Buffer> { - self.uniform_vec.uniform_buffer() - } - - #[inline] - pub fn binding(&self) -> Option { - self.uniform_vec.binding() - } - - #[inline] - pub fn len(&self) -> usize { - self.uniform_vec.len() - } - - #[inline] - pub fn is_empty(&self) -> bool { - self.uniform_vec.is_empty() - } - - #[inline] - pub fn capacity(&self) -> usize { - self.uniform_vec.capacity() - } - - #[inline] - pub fn push(&mut self, value: T) -> u32 { - (self.uniform_vec.push(DynamicUniform(value)) * self.uniform_vec.item_size) as u32 - } - - #[inline] - pub fn reserve(&mut self, capacity: usize, device: &RenderDevice) { - self.uniform_vec.reserve(capacity, device); - } - - #[inline] - pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { - self.uniform_vec.write_buffer(device, queue); - } - - #[inline] - pub fn clear(&mut self) { - self.uniform_vec.clear(); - } -} diff --git a/crates/bevy_render/src/renderer/graph_runner.rs b/crates/bevy_render/src/renderer/graph_runner.rs index 413642bc3c07f..3ed1b0e971fd5 100644 --- a/crates/bevy_render/src/renderer/graph_runner.rs +++ b/crates/bevy_render/src/renderer/graph_runner.rs @@ -41,6 +41,14 @@ pub enum RenderGraphRunnerError { expected: SlotType, actual: SlotType, }, + #[error( + "node (name: '{node_name:?}') has {slot_count} input slots, but was provided {value_count} values" + )] + MismatchedInputCount { + node_name: Option>, + slot_count: usize, + value_count: usize, + }, } impl RenderGraphRunner { @@ -101,9 +109,8 @@ impl RenderGraphRunner { expected: input_slot.slot_type, label: input_slot.name.clone().into(), }); - } else { - input_values.push(input_value.clone()); } + input_values.push(input_value.clone()); } else { return Err(RenderGraphRunnerError::MissingInput { slot_index: i, @@ -162,7 +169,13 @@ impl RenderGraphRunner { .map(|(_, value)| value) .collect(); - assert_eq!(inputs.len(), node_state.input_slots.len()); + if inputs.len() != node_state.input_slots.len() { + return Err(RenderGraphRunnerError::MismatchedInputCount { + node_name: node_state.name.clone(), + slot_count: node_state.input_slots.len(), + value_count: inputs.len(), + }); + } let mut outputs: SmallVec<[Option; 4]> = smallvec![None; node_state.output_slots.len()]; diff --git a/crates/bevy_render/src/texture/basis.rs b/crates/bevy_render/src/texture/basis.rs index 34e817c95ed0b..256bbf8d79ce3 100644 --- a/crates/bevy_render/src/texture/basis.rs +++ b/crates/bevy_render/src/texture/basis.rs @@ -114,9 +114,9 @@ pub fn basis_buffer_to_image( image.texture_descriptor.mip_level_count = image0_mip_level_count; image.texture_descriptor.format = texture_format; image.texture_descriptor.dimension = match texture_type { - BasisTextureType::TextureType2D => TextureDimension::D2, - BasisTextureType::TextureType2DArray => TextureDimension::D2, - BasisTextureType::TextureTypeCubemapArray => TextureDimension::D2, + BasisTextureType::TextureType2D + | BasisTextureType::TextureType2DArray + | BasisTextureType::TextureTypeCubemapArray => TextureDimension::D2, BasisTextureType::TextureTypeVolume => TextureDimension::D3, basis_texture_type => { return Err(TextureError::UnsupportedTextureFormat(format!( diff --git a/crates/bevy_render/src/texture/dds.rs b/crates/bevy_render/src/texture/dds.rs index 49db9d090c07e..e32059188511c 100644 --- a/crates/bevy_render/src/texture/dds.rs +++ b/crates/bevy_render/src/texture/dds.rs @@ -74,14 +74,14 @@ pub fn dds_format_to_texture_format( TextureFormat::Bc1RgbaUnorm } } - D3DFormat::DXT3 => { + D3DFormat::DXT3 | D3DFormat::DXT2 => { if is_srgb { TextureFormat::Bc2RgbaUnormSrgb } else { TextureFormat::Bc2RgbaUnorm } } - D3DFormat::DXT5 => { + D3DFormat::DXT5 | D3DFormat::DXT4 => { if is_srgb { TextureFormat::Bc3RgbaUnormSrgb } else { @@ -96,20 +96,6 @@ pub fn dds_format_to_texture_format( D3DFormat::R32F => TextureFormat::R32Float, D3DFormat::G32R32F => TextureFormat::Rg32Float, D3DFormat::A32B32G32R32F => TextureFormat::Rgba32Float, - D3DFormat::DXT2 => { - if is_srgb { - TextureFormat::Bc2RgbaUnormSrgb - } else { - TextureFormat::Bc2RgbaUnorm - } - } - D3DFormat::DXT4 => { - if is_srgb { - TextureFormat::Bc3RgbaUnormSrgb - } else { - TextureFormat::Bc3RgbaUnorm - } - } D3DFormat::A1R5G5B5 | D3DFormat::R5G6B5 // FIXME: Map to argb format and user has to know to ignore the alpha channel? @@ -136,38 +122,28 @@ pub fn dds_format_to_texture_format( } } else if let Some(dxgi_format) = dds.get_dxgi_format() { match dxgi_format { - DxgiFormat::R32G32B32A32_Typeless => TextureFormat::Rgba32Float, - DxgiFormat::R32G32B32A32_Float => TextureFormat::Rgba32Float, + DxgiFormat::R32G32B32A32_Typeless | DxgiFormat::R32G32B32A32_Float => { + TextureFormat::Rgba32Float + } DxgiFormat::R32G32B32A32_UInt => TextureFormat::Rgba32Uint, DxgiFormat::R32G32B32A32_SInt => TextureFormat::Rgba32Sint, - DxgiFormat::R16G16B16A16_Typeless => TextureFormat::Rgba16Float, - DxgiFormat::R16G16B16A16_Float => TextureFormat::Rgba16Float, + DxgiFormat::R16G16B16A16_Typeless | DxgiFormat::R16G16B16A16_Float => { + TextureFormat::Rgba16Float + } DxgiFormat::R16G16B16A16_UNorm => TextureFormat::Rgba16Unorm, DxgiFormat::R16G16B16A16_UInt => TextureFormat::Rgba16Uint, DxgiFormat::R16G16B16A16_SNorm => TextureFormat::Rgba16Snorm, DxgiFormat::R16G16B16A16_SInt => TextureFormat::Rgba16Sint, - DxgiFormat::R32G32_Typeless => TextureFormat::Rg32Float, - DxgiFormat::R32G32_Float => TextureFormat::Rg32Float, + DxgiFormat::R32G32_Typeless | DxgiFormat::R32G32_Float => TextureFormat::Rg32Float, DxgiFormat::R32G32_UInt => TextureFormat::Rg32Uint, DxgiFormat::R32G32_SInt => TextureFormat::Rg32Sint, - DxgiFormat::R10G10B10A2_Typeless => TextureFormat::Rgb10a2Unorm, - DxgiFormat::R10G10B10A2_UNorm => TextureFormat::Rgb10a2Unorm, - DxgiFormat::R11G11B10_Float => TextureFormat::Rg11b10Float, - DxgiFormat::R8G8B8A8_Typeless => { - if is_srgb { - TextureFormat::Rgba8UnormSrgb - } else { - TextureFormat::Rgba8Unorm - } - } - DxgiFormat::R8G8B8A8_UNorm => { - if is_srgb { - TextureFormat::Rgba8UnormSrgb - } else { - TextureFormat::Rgba8Unorm - } + DxgiFormat::R10G10B10A2_Typeless | DxgiFormat::R10G10B10A2_UNorm => { + TextureFormat::Rgb10a2Unorm } - DxgiFormat::R8G8B8A8_UNorm_sRGB => { + DxgiFormat::R11G11B10_Float => TextureFormat::Rg11b10Float, + DxgiFormat::R8G8B8A8_Typeless + | DxgiFormat::R8G8B8A8_UNorm + | DxgiFormat::R8G8B8A8_UNorm_sRGB => { if is_srgb { TextureFormat::Rgba8UnormSrgb } else { @@ -177,121 +153,61 @@ pub fn dds_format_to_texture_format( DxgiFormat::R8G8B8A8_UInt => TextureFormat::Rgba8Uint, DxgiFormat::R8G8B8A8_SNorm => TextureFormat::Rgba8Snorm, DxgiFormat::R8G8B8A8_SInt => TextureFormat::Rgba8Sint, - DxgiFormat::R16G16_Typeless => TextureFormat::Rg16Float, - DxgiFormat::R16G16_Float => TextureFormat::Rg16Float, + DxgiFormat::R16G16_Typeless | DxgiFormat::R16G16_Float => TextureFormat::Rg16Float, DxgiFormat::R16G16_UNorm => TextureFormat::Rg16Unorm, DxgiFormat::R16G16_UInt => TextureFormat::Rg16Uint, DxgiFormat::R16G16_SNorm => TextureFormat::Rg16Snorm, DxgiFormat::R16G16_SInt => TextureFormat::Rg16Sint, - DxgiFormat::R32_Typeless => TextureFormat::R32Float, + DxgiFormat::R32_Typeless | DxgiFormat::R32_Float => TextureFormat::R32Float, DxgiFormat::D32_Float => TextureFormat::Depth32Float, - DxgiFormat::R32_Float => TextureFormat::R32Float, DxgiFormat::R32_UInt => TextureFormat::R32Uint, DxgiFormat::R32_SInt => TextureFormat::R32Sint, - DxgiFormat::R24G8_Typeless => TextureFormat::Depth24PlusStencil8, - DxgiFormat::D24_UNorm_S8_UInt => TextureFormat::Depth24PlusStencil8, + DxgiFormat::R24G8_Typeless | DxgiFormat::D24_UNorm_S8_UInt => { + TextureFormat::Depth24PlusStencil8 + } DxgiFormat::R24_UNorm_X8_Typeless => TextureFormat::Depth24Plus, - DxgiFormat::R8G8_Typeless => TextureFormat::Rg8Unorm, - DxgiFormat::R8G8_UNorm => TextureFormat::Rg8Unorm, + DxgiFormat::R8G8_Typeless | DxgiFormat::R8G8_UNorm => TextureFormat::Rg8Unorm, DxgiFormat::R8G8_UInt => TextureFormat::Rg8Uint, DxgiFormat::R8G8_SNorm => TextureFormat::Rg8Snorm, DxgiFormat::R8G8_SInt => TextureFormat::Rg8Sint, - DxgiFormat::R16_Typeless => TextureFormat::R16Float, - DxgiFormat::R16_Float => TextureFormat::R16Float, + DxgiFormat::R16_Typeless | DxgiFormat::R16_Float => TextureFormat::R16Float, DxgiFormat::R16_UNorm => TextureFormat::R16Unorm, DxgiFormat::R16_UInt => TextureFormat::R16Uint, DxgiFormat::R16_SNorm => TextureFormat::R16Snorm, DxgiFormat::R16_SInt => TextureFormat::R16Sint, - DxgiFormat::R8_Typeless => TextureFormat::R8Unorm, - DxgiFormat::R8_UNorm => TextureFormat::R8Unorm, + DxgiFormat::R8_Typeless | DxgiFormat::R8_UNorm => TextureFormat::R8Unorm, DxgiFormat::R8_UInt => TextureFormat::R8Uint, DxgiFormat::R8_SNorm => TextureFormat::R8Snorm, DxgiFormat::R8_SInt => TextureFormat::R8Sint, DxgiFormat::R9G9B9E5_SharedExp => TextureFormat::Rgb9e5Ufloat, - DxgiFormat::BC1_Typeless => { - if is_srgb { - TextureFormat::Bc1RgbaUnormSrgb - } else { - TextureFormat::Bc1RgbaUnorm - } - } - DxgiFormat::BC1_UNorm => { + DxgiFormat::BC1_Typeless | DxgiFormat::BC1_UNorm | DxgiFormat::BC1_UNorm_sRGB => { if is_srgb { TextureFormat::Bc1RgbaUnormSrgb } else { TextureFormat::Bc1RgbaUnorm } } - DxgiFormat::BC1_UNorm_sRGB => { - if is_srgb { - TextureFormat::Bc1RgbaUnormSrgb - } else { - TextureFormat::Bc1RgbaUnorm - } - } - DxgiFormat::BC2_Typeless => { - if is_srgb { - TextureFormat::Bc2RgbaUnormSrgb - } else { - TextureFormat::Bc2RgbaUnorm - } - } - DxgiFormat::BC2_UNorm => { + DxgiFormat::BC2_Typeless | DxgiFormat::BC2_UNorm | DxgiFormat::BC2_UNorm_sRGB => { if is_srgb { TextureFormat::Bc2RgbaUnormSrgb } else { TextureFormat::Bc2RgbaUnorm } } - DxgiFormat::BC2_UNorm_sRGB => { - if is_srgb { - TextureFormat::Bc2RgbaUnormSrgb - } else { - TextureFormat::Bc2RgbaUnorm - } - } - DxgiFormat::BC3_Typeless => { - if is_srgb { - TextureFormat::Bc3RgbaUnormSrgb - } else { - TextureFormat::Bc3RgbaUnorm - } - } - DxgiFormat::BC3_UNorm => { + DxgiFormat::BC3_Typeless | DxgiFormat::BC3_UNorm | DxgiFormat::BC3_UNorm_sRGB => { if is_srgb { TextureFormat::Bc3RgbaUnormSrgb } else { TextureFormat::Bc3RgbaUnorm } } - DxgiFormat::BC3_UNorm_sRGB => { - if is_srgb { - TextureFormat::Bc3RgbaUnormSrgb - } else { - TextureFormat::Bc3RgbaUnorm - } - } - DxgiFormat::BC4_Typeless => TextureFormat::Bc4RUnorm, - DxgiFormat::BC4_UNorm => TextureFormat::Bc4RUnorm, + DxgiFormat::BC4_Typeless | DxgiFormat::BC4_UNorm => TextureFormat::Bc4RUnorm, DxgiFormat::BC4_SNorm => TextureFormat::Bc4RSnorm, - DxgiFormat::BC5_Typeless => TextureFormat::Bc5RgUnorm, - DxgiFormat::BC5_UNorm => TextureFormat::Bc5RgUnorm, + DxgiFormat::BC5_Typeless | DxgiFormat::BC5_UNorm => TextureFormat::Bc5RgUnorm, DxgiFormat::BC5_SNorm => TextureFormat::Bc5RgSnorm, - DxgiFormat::B8G8R8A8_UNorm => { - if is_srgb { - TextureFormat::Bgra8UnormSrgb - } else { - TextureFormat::Bgra8Unorm - } - } - DxgiFormat::B8G8R8A8_Typeless => { - if is_srgb { - TextureFormat::Bgra8UnormSrgb - } else { - TextureFormat::Bgra8Unorm - } - } - DxgiFormat::B8G8R8A8_UNorm_sRGB => { + DxgiFormat::B8G8R8A8_UNorm + | DxgiFormat::B8G8R8A8_Typeless + | DxgiFormat::B8G8R8A8_UNorm_sRGB => { if is_srgb { TextureFormat::Bgra8UnormSrgb } else { @@ -299,24 +215,9 @@ pub fn dds_format_to_texture_format( } } - DxgiFormat::BC6H_Typeless => TextureFormat::Bc6hRgbUfloat, - DxgiFormat::BC6H_UF16 => TextureFormat::Bc6hRgbUfloat, + DxgiFormat::BC6H_Typeless | DxgiFormat::BC6H_UF16 => TextureFormat::Bc6hRgbUfloat, DxgiFormat::BC6H_SF16 => TextureFormat::Bc6hRgbSfloat, - DxgiFormat::BC7_Typeless => { - if is_srgb { - TextureFormat::Bc7RgbaUnormSrgb - } else { - TextureFormat::Bc7RgbaUnorm - } - } - DxgiFormat::BC7_UNorm => { - if is_srgb { - TextureFormat::Bc7RgbaUnormSrgb - } else { - TextureFormat::Bc7RgbaUnorm - } - } - DxgiFormat::BC7_UNorm_sRGB => { + DxgiFormat::BC7_Typeless | DxgiFormat::BC7_UNorm | DxgiFormat::BC7_UNorm_sRGB => { if is_srgb { TextureFormat::Bc7RgbaUnormSrgb } else { diff --git a/crates/bevy_render/src/texture/hdr_texture_loader.rs b/crates/bevy_render/src/texture/hdr_texture_loader.rs index 09890c0033a2a..81c539a061664 100644 --- a/crates/bevy_render/src/texture/hdr_texture_loader.rs +++ b/crates/bevy_render/src/texture/hdr_texture_loader.rs @@ -22,7 +22,7 @@ impl AssetLoader for HdrTextureLoader { "Format should have 32bit x 4 size" ); - let decoder = image::hdr::HdrDecoder::new(bytes)?; + let decoder = image::codecs::hdr::HdrDecoder::new(bytes)?; let info = decoder.metadata(); let rgb_data = decoder.read_image_hdr()?; let mut rgba_data = Vec::with_capacity(rgb_data.len() * format.pixel_size()); diff --git a/crates/bevy_render/src/texture/image.rs b/crates/bevy_render/src/texture/image.rs index f9c3bf9207e1e..bec554b1f6b57 100644 --- a/crates/bevy_render/src/texture/image.rs +++ b/crates/bevy_render/src/texture/image.rs @@ -13,9 +13,11 @@ use crate::{ texture::BevyDefault, }; use bevy_asset::HandleUntyped; +use bevy_derive::{Deref, DerefMut}; use bevy_ecs::system::{lifetimeless::SRes, SystemParamItem}; use bevy_math::Vec2; use bevy_reflect::TypeUuid; +use std::hash::Hash; use thiserror::Error; use wgpu::{ Extent3d, ImageCopyTexture, ImageDataLayout, Origin3d, TextureDimension, TextureFormat, @@ -49,14 +51,12 @@ pub enum ImageFormat { impl ImageFormat { pub fn from_mime_type(mime_type: &str) -> Option { Some(match mime_type.to_ascii_lowercase().as_str() { - "image/bmp" => ImageFormat::Bmp, - "image/x-bmp" => ImageFormat::Bmp, + "image/bmp" | "image/x-bmp" => ImageFormat::Bmp, "image/vnd-ms.dds" => ImageFormat::Dds, "image/jpeg" => ImageFormat::Jpeg, "image/ktx2" => ImageFormat::Ktx2, "image/png" => ImageFormat::Png, - "image/x-targa" => ImageFormat::Tga, - "image/x-tga" => ImageFormat::Tga, + "image/x-targa" | "image/x-tga" => ImageFormat::Tga, _ => return None, }) } @@ -85,7 +85,6 @@ impl ImageFormat { pub fn as_image_crate_format(&self) -> Option { Some(match self { ImageFormat::Avif => image::ImageFormat::Avif, - ImageFormat::Basis => return None, ImageFormat::Bmp => image::ImageFormat::Bmp, ImageFormat::Dds => image::ImageFormat::Dds, ImageFormat::Farbfeld => image::ImageFormat::Farbfeld, @@ -93,12 +92,12 @@ impl ImageFormat { ImageFormat::Hdr => image::ImageFormat::Hdr, ImageFormat::Ico => image::ImageFormat::Ico, ImageFormat::Jpeg => image::ImageFormat::Jpeg, - ImageFormat::Ktx2 => return None, ImageFormat::Png => image::ImageFormat::Png, ImageFormat::Pnm => image::ImageFormat::Pnm, ImageFormat::Tga => image::ImageFormat::Tga, ImageFormat::Tiff => image::ImageFormat::Tiff, ImageFormat::WebP => image::ImageFormat::WebP, + ImageFormat::Basis | ImageFormat::Ktx2 => return None, }) } } @@ -109,9 +108,49 @@ pub struct Image { pub data: Vec, // TODO: this nesting makes accessing Image metadata verbose. Either flatten out descriptor or add accessors pub texture_descriptor: wgpu::TextureDescriptor<'static>, - pub sampler_descriptor: wgpu::SamplerDescriptor<'static>, + pub sampler_descriptor: ImageSampler, } +/// Used in `Image`, this determines what image sampler to use when rendering. The default setting, +/// [`ImageSampler::Default`], will result in reading the sampler set in the [`DefaultImageSampler`] +/// resource - the global default sampler - at runtime. Setting this to [`ImageSampler::Descriptor`] +/// will override the global default descriptor for this [`Image`]. +#[derive(Debug, Clone)] +pub enum ImageSampler { + Default, + Descriptor(wgpu::SamplerDescriptor<'static>), +} +impl Default for ImageSampler { + fn default() -> Self { + Self::Default + } +} + +impl ImageSampler { + /// Returns a sampler descriptor with `Linear` min and mag filters + pub fn linear_descriptor() -> wgpu::SamplerDescriptor<'static> { + wgpu::SamplerDescriptor { + mag_filter: wgpu::FilterMode::Linear, + min_filter: wgpu::FilterMode::Linear, + ..Default::default() + } + } + + /// Returns a sampler descriptor with `Nearest` min and mag filters + pub fn nearest_descriptor() -> wgpu::SamplerDescriptor<'static> { + wgpu::SamplerDescriptor { + mag_filter: wgpu::FilterMode::Nearest, + min_filter: wgpu::FilterMode::Nearest, + ..Default::default() + } + } +} + +/// Resource used as the global default image sampler for [`Image`]s with their `sampler_descriptor` +/// set to [`ImageSampler::Default`]. +#[derive(Debug, Clone, Deref, DerefMut)] +pub struct DefaultImageSampler(pub(crate) Sampler); + impl Default for Image { fn default() -> Self { let format = wgpu::TextureFormat::bevy_default(); @@ -131,7 +170,7 @@ impl Default for Image { sample_count: 1, usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, }, - sampler_descriptor: wgpu::SamplerDescriptor::default(), + sampler_descriptor: ImageSampler::Default, } } } @@ -260,7 +299,6 @@ impl Image { /// - `TextureFormat::R8Unorm` /// - `TextureFormat::Rg8Unorm` /// - `TextureFormat::Rgba8UnormSrgb` - /// - `TextureFormat::Bgra8UnormSrgb` pub fn convert(&self, new_format: TextureFormat) -> Option { super::image_texture_conversion::texture_to_image(self) .and_then(|img| match new_format { @@ -274,9 +312,6 @@ impl Image { TextureFormat::Rgba8UnormSrgb => { Some((image::DynamicImage::ImageRgba8(img.into_rgba8()), true)) } - TextureFormat::Bgra8UnormSrgb => { - Some((image::DynamicImage::ImageBgra8(img.into_bgra8()), true)) - } _ => None, }) .map(|(dyn_img, is_srgb)| { @@ -338,11 +373,11 @@ impl Image { #[derive(Clone, Copy, Debug)] pub enum DataFormat { - R8, - Rg8, - Rgb8, - Rgba8, - Rgba16Float, + Rgb, + Rgba, + Rrr, + Rrrg, + Rg, } #[derive(Clone, Copy, Debug)] @@ -427,6 +462,7 @@ pub trait TextureFormatPixelInfo { } impl TextureFormatPixelInfo for TextureFormat { + #[allow(clippy::match_same_arms)] fn pixel_info(&self) -> PixelInfo { let type_size = match self { // 8bit @@ -546,7 +582,11 @@ pub struct GpuImage { impl RenderAsset for Image { type ExtractedAsset = Image; type PreparedAsset = GpuImage; - type Param = (SRes, SRes); + type Param = ( + SRes, + SRes, + SRes, + ); /// Clones the Image. fn extract_asset(&self) -> Self::ExtractedAsset { @@ -556,7 +596,7 @@ impl RenderAsset for Image { /// Converts the extracted image into a [`GpuImage`]. fn prepare_asset( image: Self::ExtractedAsset, - (render_device, render_queue): &mut SystemParamItem, + (render_device, render_queue, default_sampler): &mut SystemParamItem, ) -> Result> { let texture = if image.texture_descriptor.mip_level_count > 1 || image.is_compressed() { render_device.create_texture_with_data( @@ -599,7 +639,11 @@ impl RenderAsset for Image { image.texture_descriptor.size.width as f32, image.texture_descriptor.size.height as f32, ); - let sampler = render_device.create_sampler(&image.sampler_descriptor); + let sampler = match image.sampler_descriptor { + ImageSampler::Default => (***default_sampler).clone(), + ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + }; + Ok(GpuImage { texture, texture_view, @@ -638,62 +682,58 @@ impl CompressedImageFormats { pub fn supports(&self, format: TextureFormat) -> bool { match format { - TextureFormat::Bc1RgbaUnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc1RgbaUnormSrgb => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc2RgbaUnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc2RgbaUnormSrgb => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc3RgbaUnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc3RgbaUnormSrgb => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc4RUnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc4RSnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc5RgUnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc5RgSnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc6hRgbUfloat => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc6hRgbSfloat => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc7RgbaUnorm => self.contains(CompressedImageFormats::BC), - TextureFormat::Bc7RgbaUnormSrgb => self.contains(CompressedImageFormats::BC), - TextureFormat::Etc2Rgb8Unorm => self.contains(CompressedImageFormats::ETC2), - TextureFormat::Etc2Rgb8UnormSrgb => self.contains(CompressedImageFormats::ETC2), - TextureFormat::Etc2Rgb8A1Unorm => self.contains(CompressedImageFormats::ETC2), - TextureFormat::Etc2Rgb8A1UnormSrgb => self.contains(CompressedImageFormats::ETC2), - TextureFormat::Etc2Rgba8Unorm => self.contains(CompressedImageFormats::ETC2), - TextureFormat::Etc2Rgba8UnormSrgb => self.contains(CompressedImageFormats::ETC2), - TextureFormat::EacR11Unorm => self.contains(CompressedImageFormats::ETC2), - TextureFormat::EacR11Snorm => self.contains(CompressedImageFormats::ETC2), - TextureFormat::EacRg11Unorm => self.contains(CompressedImageFormats::ETC2), - TextureFormat::EacRg11Snorm => self.contains(CompressedImageFormats::ETC2), - TextureFormat::Astc4x4RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc4x4RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc5x4RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc5x4RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc5x5RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc5x5RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc6x5RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc6x5RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc6x6RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc6x6RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc8x5RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc8x5RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc8x6RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc8x6RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x5RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x5RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x6RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x6RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc8x8RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc8x8RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x8RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x8RgbaUnormSrgb => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x10RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc10x10RgbaUnormSrgb => { - self.contains(CompressedImageFormats::ASTC_LDR) - } - TextureFormat::Astc12x10RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc12x10RgbaUnormSrgb => { - self.contains(CompressedImageFormats::ASTC_LDR) - } - TextureFormat::Astc12x12RgbaUnorm => self.contains(CompressedImageFormats::ASTC_LDR), - TextureFormat::Astc12x12RgbaUnormSrgb => { + TextureFormat::Bc1RgbaUnorm + | TextureFormat::Bc1RgbaUnormSrgb + | TextureFormat::Bc2RgbaUnorm + | TextureFormat::Bc2RgbaUnormSrgb + | TextureFormat::Bc3RgbaUnorm + | TextureFormat::Bc3RgbaUnormSrgb + | TextureFormat::Bc4RUnorm + | TextureFormat::Bc4RSnorm + | TextureFormat::Bc5RgUnorm + | TextureFormat::Bc5RgSnorm + | TextureFormat::Bc6hRgbUfloat + | TextureFormat::Bc6hRgbSfloat + | TextureFormat::Bc7RgbaUnorm + | TextureFormat::Bc7RgbaUnormSrgb => self.contains(CompressedImageFormats::BC), + TextureFormat::Etc2Rgb8Unorm + | TextureFormat::Etc2Rgb8UnormSrgb + | TextureFormat::Etc2Rgb8A1Unorm + | TextureFormat::Etc2Rgb8A1UnormSrgb + | TextureFormat::Etc2Rgba8Unorm + | TextureFormat::Etc2Rgba8UnormSrgb + | TextureFormat::EacR11Unorm + | TextureFormat::EacR11Snorm + | TextureFormat::EacRg11Unorm + | TextureFormat::EacRg11Snorm => self.contains(CompressedImageFormats::ETC2), + TextureFormat::Astc4x4RgbaUnorm + | TextureFormat::Astc4x4RgbaUnormSrgb + | TextureFormat::Astc5x4RgbaUnorm + | TextureFormat::Astc5x4RgbaUnormSrgb + | TextureFormat::Astc5x5RgbaUnorm + | TextureFormat::Astc5x5RgbaUnormSrgb + | TextureFormat::Astc6x5RgbaUnorm + | TextureFormat::Astc6x5RgbaUnormSrgb + | TextureFormat::Astc6x6RgbaUnorm + | TextureFormat::Astc6x6RgbaUnormSrgb + | TextureFormat::Astc8x5RgbaUnorm + | TextureFormat::Astc8x5RgbaUnormSrgb + | TextureFormat::Astc8x6RgbaUnorm + | TextureFormat::Astc8x6RgbaUnormSrgb + | TextureFormat::Astc10x5RgbaUnorm + | TextureFormat::Astc10x5RgbaUnormSrgb + | TextureFormat::Astc10x6RgbaUnorm + | TextureFormat::Astc10x6RgbaUnormSrgb + | TextureFormat::Astc8x8RgbaUnorm + | TextureFormat::Astc8x8RgbaUnormSrgb + | TextureFormat::Astc10x8RgbaUnorm + | TextureFormat::Astc10x8RgbaUnormSrgb + | TextureFormat::Astc10x10RgbaUnorm + | TextureFormat::Astc10x10RgbaUnormSrgb + | TextureFormat::Astc12x10RgbaUnorm + | TextureFormat::Astc12x10RgbaUnormSrgb + | TextureFormat::Astc12x12RgbaUnorm + | TextureFormat::Astc12x12RgbaUnormSrgb => { self.contains(CompressedImageFormats::ASTC_LDR) } _ => true, diff --git a/crates/bevy_render/src/texture/image_texture_conversion.rs b/crates/bevy_render/src/texture/image_texture_conversion.rs index 1c7e897555001..44611faafcddb 100644 --- a/crates/bevy_render/src/texture/image_texture_conversion.rs +++ b/crates/bevy_render/src/texture/image_texture_conversion.rs @@ -60,30 +60,6 @@ pub(crate) fn image_to_texture(dyn_img: DynamicImage, is_srgb: bool) -> Image { data = i.into_raw(); } - DynamicImage::ImageBgr8(i) => { - let i = DynamicImage::ImageBgr8(i).into_bgra8(); - - width = i.width(); - height = i.height(); - format = if is_srgb { - TextureFormat::Bgra8UnormSrgb - } else { - TextureFormat::Bgra8Unorm - }; - - data = i.into_raw(); - } - DynamicImage::ImageBgra8(i) => { - width = i.width(); - height = i.height(); - format = if is_srgb { - TextureFormat::Bgra8UnormSrgb - } else { - TextureFormat::Bgra8Unorm - }; - - data = i.into_raw(); - } DynamicImage::ImageLuma16(i) => { width = i.width(); height = i.height(); @@ -135,6 +111,48 @@ pub(crate) fn image_to_texture(dyn_img: DynamicImage, is_srgb: bool) -> Image { data = cast_slice(&raw_data).to_owned(); } + DynamicImage::ImageRgb32F(image) => { + width = image.width(); + height = image.height(); + format = TextureFormat::Rgba32Float; + + let mut local_data = + Vec::with_capacity(width as usize * height as usize * format.pixel_size()); + + for pixel in image.into_raw().chunks_exact(3) { + // TODO: use the array_chunks method once stabilised + // https://github.com/rust-lang/rust/issues/74985 + let r = pixel[0]; + let g = pixel[1]; + let b = pixel[2]; + let a = u16::max_value(); + + local_data.extend_from_slice(&r.to_ne_bytes()); + local_data.extend_from_slice(&g.to_ne_bytes()); + local_data.extend_from_slice(&b.to_ne_bytes()); + local_data.extend_from_slice(&a.to_ne_bytes()); + } + + data = local_data; + } + DynamicImage::ImageRgba32F(image) => { + width = image.width(); + height = image.height(); + format = TextureFormat::Rgba32Float; + + let raw_data = image.into_raw(); + + data = cast_slice(&raw_data).to_owned(); + } + // DynamicImage is now non exhaustive, catch future variants and convert them + _ => { + let image = dyn_img.into_rgba8(); + width = image.width(); + height = image.height(); + format = TextureFormat::Rgba8UnormSrgb; + + data = image.into_raw(); + } } Image::new( @@ -171,12 +189,6 @@ pub(crate) fn texture_to_image(texture: &Image) -> Option { texture.data.clone(), ) .map(DynamicImage::ImageRgba8), - TextureFormat::Bgra8UnormSrgb => ImageBuffer::from_raw( - texture.texture_descriptor.size.width, - texture.texture_descriptor.size.height, - texture.data.clone(), - ) - .map(DynamicImage::ImageBgra8), _ => None, } } diff --git a/crates/bevy_render/src/texture/ktx2.rs b/crates/bevy_render/src/texture/ktx2.rs index 0bc9eb71c606a..7a4fb62682ba2 100644 --- a/crates/bevy_render/src/texture/ktx2.rs +++ b/crates/bevy_render/src/texture/ktx2.rs @@ -85,7 +85,7 @@ pub fn ktx2_buffer_to_image( TranscodeFormat::Rgb8 => { let (mut original_width, mut original_height) = (width, height); - for level_data in levels.iter() { + for level_data in &levels { let n_pixels = (original_width * original_height) as usize; let mut rgba = vec![255u8; n_pixels * 4]; @@ -206,7 +206,7 @@ pub fn get_transcoded_formats( is_srgb: bool, ) -> (TranscoderBlockFormat, TextureFormat) { match data_format { - DataFormat::R8 => { + DataFormat::Rrr => { if supported_compressed_formats.contains(CompressedImageFormats::BC) { (TranscoderBlockFormat::BC4, TextureFormat::Bc4RUnorm) } else if supported_compressed_formats.contains(CompressedImageFormats::ETC2) { @@ -218,7 +218,7 @@ pub fn get_transcoded_formats( (TranscoderBlockFormat::RGBA32, TextureFormat::R8Unorm) } } - DataFormat::Rg8 => { + DataFormat::Rrrg | DataFormat::Rg => { if supported_compressed_formats.contains(CompressedImageFormats::BC) { (TranscoderBlockFormat::BC5, TextureFormat::Bc5RgUnorm) } else if supported_compressed_formats.contains(CompressedImageFormats::ETC2) { @@ -232,7 +232,7 @@ pub fn get_transcoded_formats( } // NOTE: Rgba16Float should be transcoded to BC6H/ASTC_HDR. Neither are supported by // basis-universal, nor is ASTC_HDR supported by wgpu - DataFormat::Rgb8 | DataFormat::Rgba8 | DataFormat::Rgba16Float => { + DataFormat::Rgb | DataFormat::Rgba => { // NOTE: UASTC can be losslessly transcoded to ASTC4x4 and ASTC uses the same // space as BC7 (128-bits per 4x4 texel block) so prefer ASTC over BC for // transcoding speed and quality. @@ -766,25 +766,19 @@ pub fn ktx2_dfd_to_texture_format( } } } - Some(ColorModel::YUVSDA) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::YIQSDA) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::LabSDA) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::CMYKA) => { + Some(ColorModel::YUVSDA) + | Some(ColorModel::YIQSDA) + | Some(ColorModel::LabSDA) + | Some(ColorModel::CMYKA) + | Some(ColorModel::HSVAAng) + | Some(ColorModel::HSLAAng) + | Some(ColorModel::HSVAHex) + | Some(ColorModel::HSLAHex) + | Some(ColorModel::YCgCoA) + | Some(ColorModel::YcCbcCrc) + | Some(ColorModel::ICtCp) + | Some(ColorModel::CIEXYZ) + | Some(ColorModel::CIEXYY) => { return Err(TextureError::UnsupportedTextureFormat(format!( "{:?}", data_format_descriptor.color_model @@ -897,60 +891,6 @@ pub fn ktx2_dfd_to_texture_format( } } } - Some(ColorModel::HSVAAng) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::HSLAAng) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::HSVAHex) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::HSLAHex) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::YCgCoA) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::YcCbcCrc) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::ICtCp) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::CIEXYZ) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } - Some(ColorModel::CIEXYY) => { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - data_format_descriptor.color_model - ))); - } Some(ColorModel::BC1A) => { if is_srgb { TextureFormat::Bc1RgbaUnormSrgb @@ -1247,18 +1187,18 @@ pub fn ktx2_dfd_to_texture_format( } Some(ColorModel::UASTC) => { return Err(TextureError::FormatRequiresTranscodingError( - TranscodeFormat::Uastc(match sample_information.len() { - 1 => DataFormat::R8, - 2 => DataFormat::Rg8, - 3 => DataFormat::Rgb8, - 4 => { - if sample_information[0].bit_length == 8 { - DataFormat::Rgba8 - } else { - DataFormat::Rgba16Float - } + TranscodeFormat::Uastc(match sample_information[0].channel_type { + 0 => DataFormat::Rgb, + 3 => DataFormat::Rgba, + 4 => DataFormat::Rrr, + 5 => DataFormat::Rrrg, + 6 => DataFormat::Rg, + channel_type => { + return Err(TextureError::UnsupportedTextureFormat(format!( + "Invalid KTX2 UASTC channel type: {}", + channel_type + ))) } - _ => DataFormat::Rgba8, }), )); } @@ -1281,53 +1221,31 @@ pub fn ktx2_format_to_texture_format( is_srgb: bool, ) -> Result { Ok(match ktx2_format { - ktx2::Format::R8_UNORM => { + ktx2::Format::R8_UNORM | ktx2::Format::R8_SRGB => { if is_srgb { return Err(TextureError::UnsupportedTextureFormat(format!( "{:?}", ktx2_format ))); - } else { - TextureFormat::R8Unorm } + TextureFormat::R8Unorm } ktx2::Format::R8_SNORM => TextureFormat::R8Snorm, ktx2::Format::R8_UINT => TextureFormat::R8Uint, ktx2::Format::R8_SINT => TextureFormat::R8Sint, - ktx2::Format::R8_SRGB => { + ktx2::Format::R8G8_UNORM | ktx2::Format::R8G8_SRGB => { if is_srgb { return Err(TextureError::UnsupportedTextureFormat(format!( "{:?}", ktx2_format ))); - } else { - TextureFormat::R8Unorm - } - } - ktx2::Format::R8G8_UNORM => { - if is_srgb { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - ktx2_format - ))); - } else { - TextureFormat::Rg8Unorm } + TextureFormat::Rg8Unorm } ktx2::Format::R8G8_SNORM => TextureFormat::Rg8Snorm, ktx2::Format::R8G8_UINT => TextureFormat::Rg8Uint, ktx2::Format::R8G8_SINT => TextureFormat::Rg8Sint, - ktx2::Format::R8G8_SRGB => { - if is_srgb { - return Err(TextureError::UnsupportedTextureFormat(format!( - "{:?}", - ktx2_format - ))); - } else { - TextureFormat::Rg8Unorm - } - } - ktx2::Format::R8G8B8A8_UNORM => { + ktx2::Format::R8G8B8A8_UNORM | ktx2::Format::R8G8B8A8_SRGB => { if is_srgb { TextureFormat::Rgba8UnormSrgb } else { @@ -1337,21 +1255,7 @@ pub fn ktx2_format_to_texture_format( ktx2::Format::R8G8B8A8_SNORM => TextureFormat::Rgba8Snorm, ktx2::Format::R8G8B8A8_UINT => TextureFormat::Rgba8Uint, ktx2::Format::R8G8B8A8_SINT => TextureFormat::Rgba8Sint, - ktx2::Format::R8G8B8A8_SRGB => { - if is_srgb { - TextureFormat::Rgba8UnormSrgb - } else { - TextureFormat::Rgba8Unorm - } - } - ktx2::Format::B8G8R8A8_UNORM => { - if is_srgb { - TextureFormat::Bgra8UnormSrgb - } else { - TextureFormat::Bgra8Unorm - } - } - ktx2::Format::B8G8R8A8_SRGB => { + ktx2::Format::B8G8R8A8_UNORM | ktx2::Format::B8G8R8A8_SRGB => { if is_srgb { TextureFormat::Bgra8UnormSrgb } else { @@ -1395,56 +1299,24 @@ pub fn ktx2_format_to_texture_format( ktx2::Format::D24_UNORM_S8_UINT => TextureFormat::Depth24PlusStencil8, - ktx2::Format::BC1_RGB_UNORM_BLOCK => { + ktx2::Format::BC1_RGB_UNORM_BLOCK + | ktx2::Format::BC1_RGB_SRGB_BLOCK + | ktx2::Format::BC1_RGBA_UNORM_BLOCK + | ktx2::Format::BC1_RGBA_SRGB_BLOCK => { if is_srgb { TextureFormat::Bc1RgbaUnormSrgb } else { TextureFormat::Bc1RgbaUnorm } } - ktx2::Format::BC1_RGB_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Bc1RgbaUnormSrgb - } else { - TextureFormat::Bc1RgbaUnorm - } - } - ktx2::Format::BC1_RGBA_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Bc1RgbaUnormSrgb - } else { - TextureFormat::Bc1RgbaUnorm - } - } - ktx2::Format::BC1_RGBA_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Bc1RgbaUnormSrgb - } else { - TextureFormat::Bc1RgbaUnorm - } - } - ktx2::Format::BC2_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Bc2RgbaUnormSrgb - } else { - TextureFormat::Bc2RgbaUnorm - } - } - ktx2::Format::BC2_SRGB_BLOCK => { + ktx2::Format::BC2_UNORM_BLOCK | ktx2::Format::BC2_SRGB_BLOCK => { if is_srgb { TextureFormat::Bc2RgbaUnormSrgb } else { TextureFormat::Bc2RgbaUnorm } } - ktx2::Format::BC3_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Bc3RgbaUnormSrgb - } else { - TextureFormat::Bc3RgbaUnorm - } - } - ktx2::Format::BC3_SRGB_BLOCK => { + ktx2::Format::BC3_UNORM_BLOCK | ktx2::Format::BC3_SRGB_BLOCK => { if is_srgb { TextureFormat::Bc3RgbaUnormSrgb } else { @@ -1457,56 +1329,28 @@ pub fn ktx2_format_to_texture_format( ktx2::Format::BC5_SNORM_BLOCK => TextureFormat::Bc5RgSnorm, ktx2::Format::BC6H_UFLOAT_BLOCK => TextureFormat::Bc6hRgbUfloat, ktx2::Format::BC6H_SFLOAT_BLOCK => TextureFormat::Bc6hRgbSfloat, - ktx2::Format::BC7_UNORM_BLOCK => { + ktx2::Format::BC7_UNORM_BLOCK | ktx2::Format::BC7_SRGB_BLOCK => { if is_srgb { TextureFormat::Bc7RgbaUnormSrgb } else { TextureFormat::Bc7RgbaUnorm } } - ktx2::Format::BC7_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Bc7RgbaUnormSrgb - } else { - TextureFormat::Bc7RgbaUnorm - } - } - ktx2::Format::ETC2_R8G8B8_UNORM_BLOCK => { + ktx2::Format::ETC2_R8G8B8_UNORM_BLOCK | ktx2::Format::ETC2_R8G8B8_SRGB_BLOCK => { if is_srgb { TextureFormat::Etc2Rgb8UnormSrgb } else { TextureFormat::Etc2Rgb8Unorm } } - ktx2::Format::ETC2_R8G8B8_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Etc2Rgb8UnormSrgb - } else { - TextureFormat::Etc2Rgb8Unorm - } - } - ktx2::Format::ETC2_R8G8B8A1_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Etc2Rgb8A1UnormSrgb - } else { - TextureFormat::Etc2Rgb8A1Unorm - } - } - ktx2::Format::ETC2_R8G8B8A1_SRGB_BLOCK => { + ktx2::Format::ETC2_R8G8B8A1_UNORM_BLOCK | ktx2::Format::ETC2_R8G8B8A1_SRGB_BLOCK => { if is_srgb { TextureFormat::Etc2Rgb8A1UnormSrgb } else { TextureFormat::Etc2Rgb8A1Unorm } } - ktx2::Format::ETC2_R8G8B8A8_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Etc2Rgba8UnormSrgb - } else { - TextureFormat::Etc2Rgba8Unorm - } - } - ktx2::Format::ETC2_R8G8B8A8_SRGB_BLOCK => { + ktx2::Format::ETC2_R8G8B8A8_UNORM_BLOCK | ktx2::Format::ETC2_R8G8B8A8_SRGB_BLOCK => { if is_srgb { TextureFormat::Etc2Rgba8UnormSrgb } else { @@ -1517,196 +1361,98 @@ pub fn ktx2_format_to_texture_format( ktx2::Format::EAC_R11_SNORM_BLOCK => TextureFormat::EacR11Snorm, ktx2::Format::EAC_R11G11_UNORM_BLOCK => TextureFormat::EacRg11Unorm, ktx2::Format::EAC_R11G11_SNORM_BLOCK => TextureFormat::EacRg11Snorm, - ktx2::Format::ASTC_4x4_UNORM_BLOCK => { + ktx2::Format::ASTC_4x4_UNORM_BLOCK | ktx2::Format::ASTC_4x4_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc4x4RgbaUnormSrgb } else { TextureFormat::Astc4x4RgbaUnorm } } - ktx2::Format::ASTC_4x4_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Astc4x4RgbaUnormSrgb - } else { - TextureFormat::Astc4x4RgbaUnorm - } - } - ktx2::Format::ASTC_5x4_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Astc5x4RgbaUnormSrgb - } else { - TextureFormat::Astc5x4RgbaUnorm - } - } - ktx2::Format::ASTC_5x4_SRGB_BLOCK => { + ktx2::Format::ASTC_5x4_UNORM_BLOCK | ktx2::Format::ASTC_5x4_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc5x4RgbaUnormSrgb } else { TextureFormat::Astc5x4RgbaUnorm } } - ktx2::Format::ASTC_5x5_UNORM_BLOCK => { + ktx2::Format::ASTC_5x5_UNORM_BLOCK | ktx2::Format::ASTC_5x5_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc5x5RgbaUnormSrgb } else { TextureFormat::Astc5x5RgbaUnorm } } - ktx2::Format::ASTC_5x5_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Astc5x5RgbaUnormSrgb - } else { - TextureFormat::Astc5x5RgbaUnorm - } - } - ktx2::Format::ASTC_6x5_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Astc6x5RgbaUnormSrgb - } else { - TextureFormat::Astc6x5RgbaUnorm - } - } - ktx2::Format::ASTC_6x5_SRGB_BLOCK => { + ktx2::Format::ASTC_6x5_UNORM_BLOCK | ktx2::Format::ASTC_6x5_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc6x5RgbaUnormSrgb } else { TextureFormat::Astc6x5RgbaUnorm } } - ktx2::Format::ASTC_6x6_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Astc6x6RgbaUnormSrgb - } else { - TextureFormat::Astc6x6RgbaUnorm - } - } - ktx2::Format::ASTC_6x6_SRGB_BLOCK => { + ktx2::Format::ASTC_6x6_UNORM_BLOCK | ktx2::Format::ASTC_6x6_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc6x6RgbaUnormSrgb } else { TextureFormat::Astc6x6RgbaUnorm } } - ktx2::Format::ASTC_8x5_UNORM_BLOCK => { + ktx2::Format::ASTC_8x5_UNORM_BLOCK | ktx2::Format::ASTC_8x5_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc8x5RgbaUnormSrgb } else { TextureFormat::Astc8x5RgbaUnorm } } - ktx2::Format::ASTC_8x5_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Astc8x5RgbaUnormSrgb - } else { - TextureFormat::Astc8x5RgbaUnorm - } - } - ktx2::Format::ASTC_8x6_UNORM_BLOCK => { + ktx2::Format::ASTC_8x6_UNORM_BLOCK | ktx2::Format::ASTC_8x6_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc8x6RgbaUnormSrgb } else { TextureFormat::Astc8x6RgbaUnorm } } - ktx2::Format::ASTC_8x6_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Astc8x6RgbaUnormSrgb - } else { - TextureFormat::Astc8x6RgbaUnorm - } - } - ktx2::Format::ASTC_8x8_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Astc8x8RgbaUnormSrgb - } else { - TextureFormat::Astc8x8RgbaUnorm - } - } - ktx2::Format::ASTC_8x8_SRGB_BLOCK => { + ktx2::Format::ASTC_8x8_UNORM_BLOCK | ktx2::Format::ASTC_8x8_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc8x8RgbaUnormSrgb } else { TextureFormat::Astc8x8RgbaUnorm } } - ktx2::Format::ASTC_10x5_UNORM_BLOCK => { + ktx2::Format::ASTC_10x5_UNORM_BLOCK | ktx2::Format::ASTC_10x5_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc10x5RgbaUnormSrgb } else { TextureFormat::Astc10x5RgbaUnorm } } - ktx2::Format::ASTC_10x5_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Astc10x5RgbaUnormSrgb - } else { - TextureFormat::Astc10x5RgbaUnorm - } - } - ktx2::Format::ASTC_10x6_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Astc10x6RgbaUnormSrgb - } else { - TextureFormat::Astc10x6RgbaUnorm - } - } - ktx2::Format::ASTC_10x6_SRGB_BLOCK => { + ktx2::Format::ASTC_10x6_UNORM_BLOCK | ktx2::Format::ASTC_10x6_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc10x6RgbaUnormSrgb } else { TextureFormat::Astc10x6RgbaUnorm } } - ktx2::Format::ASTC_10x8_UNORM_BLOCK => { + ktx2::Format::ASTC_10x8_UNORM_BLOCK | ktx2::Format::ASTC_10x8_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc10x8RgbaUnormSrgb } else { TextureFormat::Astc10x8RgbaUnorm } } - ktx2::Format::ASTC_10x8_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Astc10x8RgbaUnormSrgb - } else { - TextureFormat::Astc10x8RgbaUnorm - } - } - ktx2::Format::ASTC_10x10_UNORM_BLOCK => { + ktx2::Format::ASTC_10x10_UNORM_BLOCK | ktx2::Format::ASTC_10x10_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc10x10RgbaUnormSrgb } else { TextureFormat::Astc10x10RgbaUnorm } } - ktx2::Format::ASTC_10x10_SRGB_BLOCK => { - if is_srgb { - TextureFormat::Astc10x10RgbaUnormSrgb - } else { - TextureFormat::Astc10x10RgbaUnorm - } - } - ktx2::Format::ASTC_12x10_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Astc12x10RgbaUnormSrgb - } else { - TextureFormat::Astc12x10RgbaUnorm - } - } - ktx2::Format::ASTC_12x10_SRGB_BLOCK => { + ktx2::Format::ASTC_12x10_UNORM_BLOCK | ktx2::Format::ASTC_12x10_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc12x10RgbaUnormSrgb } else { TextureFormat::Astc12x10RgbaUnorm } } - ktx2::Format::ASTC_12x12_UNORM_BLOCK => { - if is_srgb { - TextureFormat::Astc12x12RgbaUnormSrgb - } else { - TextureFormat::Astc12x12RgbaUnorm - } - } - ktx2::Format::ASTC_12x12_SRGB_BLOCK => { + ktx2::Format::ASTC_12x12_UNORM_BLOCK | ktx2::Format::ASTC_12x12_SRGB_BLOCK => { if is_srgb { TextureFormat::Astc12x12RgbaUnormSrgb } else { diff --git a/crates/bevy_render/src/texture/mod.rs b/crates/bevy_render/src/texture/mod.rs index 851eac1371b9e..1ca625d1a2cfc 100644 --- a/crates/bevy_render/src/texture/mod.rs +++ b/crates/bevy_render/src/texture/mod.rs @@ -26,6 +26,7 @@ pub use texture_cache::*; use crate::{ render_asset::{PrepareAssetLabel, RenderAssetPlugin}, + renderer::RenderDevice, RenderApp, RenderStage, }; use bevy_app::{App, Plugin}; @@ -63,14 +64,52 @@ impl Plugin for ImagePlugin { .resource_mut::>() .set_untracked(DEFAULT_IMAGE_HANDLE, Image::default()); + let default_sampler = app + .world + .get_resource_or_insert_with(ImageSettings::default) + .default_sampler + .clone(); if let Ok(render_app) = app.get_sub_app_mut(RenderApp) { + let default_sampler = { + let device = render_app.world.resource::(); + device.create_sampler(&default_sampler) + }; render_app + .insert_resource(DefaultImageSampler(default_sampler)) .init_resource::() .add_system_to_stage(RenderStage::Cleanup, update_texture_cache_system); } } } +/// [`ImagePlugin`] settings. +pub struct ImageSettings { + /// The default image sampler to use when [`ImageSampler`] is set to `Default`. + pub default_sampler: wgpu::SamplerDescriptor<'static>, +} + +impl Default for ImageSettings { + fn default() -> Self { + ImageSettings::default_linear() + } +} + +impl ImageSettings { + /// Creates image settings with default linear sampling. + pub fn default_linear() -> ImageSettings { + ImageSettings { + default_sampler: ImageSampler::linear_descriptor(), + } + } + + /// Creates image settings with default nearest sampling. + pub fn default_nearest() -> ImageSettings { + ImageSettings { + default_sampler: ImageSampler::nearest_descriptor(), + } + } +} + pub trait BevyDefault { fn bevy_default() -> Self; } diff --git a/crates/bevy_render/src/view/mod.rs b/crates/bevy_render/src/view/mod.rs index f5e547b0f8716..2638e94d4581a 100644 --- a/crates/bevy_render/src/view/mod.rs +++ b/crates/bevy_render/src/view/mod.rs @@ -10,9 +10,10 @@ pub use window::*; use crate::{ camera::ExtractedCamera, + extract_resource::{ExtractResource, ExtractResourcePlugin}, prelude::Image, render_asset::RenderAssets, - render_resource::{std140::AsStd140, DynamicUniformVec, Texture, TextureView}, + render_resource::{DynamicUniformBuffer, ShaderType, Texture, TextureView}, renderer::{RenderDevice, RenderQueue}, texture::{BevyDefault, TextureCache}, RenderApp, RenderStage, @@ -27,12 +28,14 @@ pub struct ViewPlugin; impl Plugin for ViewPlugin { fn build(&self, app: &mut App) { - app.init_resource::().add_plugin(VisibilityPlugin); + app.init_resource::() + // NOTE: windows.is_changed() handles cases where a window was resized + .add_plugin(ExtractResourcePlugin::::default()) + .add_plugin(VisibilityPlugin); if let Ok(render_app) = app.get_sub_app_mut(RenderApp) { render_app .init_resource::() - .add_system_to_stage(RenderStage::Extract, extract_msaa) .add_system_to_stage(RenderStage::Prepare, prepare_view_uniforms) .add_system_to_stage( RenderStage::Prepare, @@ -42,7 +45,7 @@ impl Plugin for ViewPlugin { } } -#[derive(Clone)] +#[derive(Clone, ExtractResource)] /// Configuration resource for [Multi-Sample Anti-Aliasing](https://en.wikipedia.org/wiki/Multisample_anti-aliasing). /// /// # Example @@ -70,11 +73,6 @@ impl Default for Msaa { } } -pub fn extract_msaa(mut commands: Commands, msaa: Res) { - // NOTE: windows.is_changed() handles cases where a window was resized - commands.insert_resource(msaa.clone()); -} - #[derive(Component)] pub struct ExtractedView { pub projection: Mat4, @@ -83,7 +81,7 @@ pub struct ExtractedView { pub height: u32, } -#[derive(Clone, AsStd140)] +#[derive(Clone, ShaderType)] pub struct ViewUniform { view_proj: Mat4, view: Mat4, @@ -96,7 +94,7 @@ pub struct ViewUniform { #[derive(Default)] pub struct ViewUniforms { - pub uniforms: DynamicUniformVec, + pub uniforms: DynamicUniformBuffer, } #[derive(Component)] @@ -113,11 +111,7 @@ pub struct ViewTarget { impl ViewTarget { pub fn get_color_attachment(&self, ops: Operations) -> RenderPassColorAttachment { RenderPassColorAttachment { - view: if let Some(sampled_target) = &self.sampled_target { - sampled_target - } else { - &self.view - }, + view: self.sampled_target.as_ref().unwrap_or(&self.view), resolve_target: if self.sampled_target.is_some() { Some(&self.view) } else { @@ -178,7 +172,7 @@ fn prepare_view_targets( ) { let mut sampled_textures = HashMap::default(); for (entity, camera) in cameras.iter() { - if let Some(size) = camera.physical_size { + if let Some(target_size) = camera.physical_target_size { if let Some(texture_view) = camera.target.get_texture_view(&windows, &images) { let sampled_target = if msaa.samples > 1 { let sampled_texture = sampled_textures @@ -189,8 +183,8 @@ fn prepare_view_targets( TextureDescriptor { label: Some("sampled_color_attachment_texture"), size: Extent3d { - width: size.x, - height: size.y, + width: target_size.x, + height: target_size.y, depth_or_array_layers: 1, }, mip_level_count: 1, diff --git a/crates/bevy_render/src/view/visibility/mod.rs b/crates/bevy_render/src/view/visibility/mod.rs index 478c635572dc9..795a860bd0e49 100644 --- a/crates/bevy_render/src/view/visibility/mod.rs +++ b/crates/bevy_render/src/view/visibility/mod.rs @@ -10,9 +10,11 @@ use bevy_reflect::std_traits::ReflectDefault; use bevy_reflect::Reflect; use bevy_transform::components::GlobalTransform; use bevy_transform::TransformSystem; +use std::cell::Cell; +use thread_local::ThreadLocal; use crate::{ - camera::{Camera, CameraProjection, OrthographicProjection, PerspectiveProjection}, + camera::{Camera, CameraProjection, OrthographicProjection, PerspectiveProjection, Projection}, mesh::Mesh, primitives::{Aabb, Frustum, Sphere}, }; @@ -73,6 +75,7 @@ pub enum VisibilitySystems { CalculateBounds, UpdateOrthographicFrusta, UpdatePerspectiveFrusta, + UpdateProjectionFrusta, CheckVisibility, } @@ -98,6 +101,12 @@ impl Plugin for VisibilityPlugin { .label(UpdatePerspectiveFrusta) .after(TransformSystem::TransformPropagate), ) + .add_system_to_stage( + CoreStage::PostUpdate, + update_frusta:: + .label(UpdateProjectionFrusta) + .after(TransformSystem::TransformPropagate), + ) .add_system_to_stage( CoreStage::PostUpdate, check_visibility @@ -105,6 +114,7 @@ impl Plugin for VisibilityPlugin { .after(CalculateBounds) .after(UpdateOrthographicFrusta) .after(UpdatePerspectiveFrusta) + .after(UpdateProjectionFrusta) .after(TransformSystem::TransformPropagate), ); } @@ -140,6 +150,7 @@ pub fn update_frusta( } pub fn check_visibility( + mut thread_queues: Local>>>, mut view_query: Query<(&mut VisibleEntities, &Frustum, Option<&RenderLayers>), With>, mut visible_entity_query: ParamSet<( Query<&mut ComputedVisibility>, @@ -160,52 +171,57 @@ pub fn check_visibility( } for (mut visible_entities, frustum, maybe_view_mask) in view_query.iter_mut() { - visible_entities.entities.clear(); let view_mask = maybe_view_mask.copied().unwrap_or_default(); + visible_entities.entities.clear(); + visible_entity_query.p1().par_for_each_mut( + 1024, + |( + entity, + visibility, + mut computed_visibility, + maybe_entity_mask, + maybe_aabb, + maybe_no_frustum_culling, + maybe_transform, + )| { + if !visibility.is_visible { + return; + } - for ( - entity, - visibility, - mut computed_visibility, - maybe_entity_mask, - maybe_aabb, - maybe_no_frustum_culling, - maybe_transform, - ) in visible_entity_query.p1().iter_mut() - { - if !visibility.is_visible { - continue; - } - - let entity_mask = maybe_entity_mask.copied().unwrap_or_default(); - if !view_mask.intersects(&entity_mask) { - continue; - } - - // If we have an aabb and transform, do frustum culling - if let (Some(model_aabb), None, Some(transform)) = - (maybe_aabb, maybe_no_frustum_culling, maybe_transform) - { - let model = transform.compute_matrix(); - let model_sphere = Sphere { - center: model.transform_point3a(model_aabb.center), - radius: (Vec3A::from(transform.scale) * model_aabb.half_extents).length(), - }; - // Do quick sphere-based frustum culling - if !frustum.intersects_sphere(&model_sphere, false) { - continue; + let entity_mask = maybe_entity_mask.copied().unwrap_or_default(); + if !view_mask.intersects(&entity_mask) { + return; } - // If we have an aabb, do aabb-based frustum culling - if !frustum.intersects_obb(model_aabb, &model, false) { - continue; + + // If we have an aabb and transform, do frustum culling + if let (Some(model_aabb), None, Some(transform)) = + (maybe_aabb, maybe_no_frustum_culling, maybe_transform) + { + let model = transform.compute_matrix(); + let model_sphere = Sphere { + center: model.transform_point3a(model_aabb.center), + radius: (Vec3A::from(transform.scale) * model_aabb.half_extents).length(), + }; + // Do quick sphere-based frustum culling + if !frustum.intersects_sphere(&model_sphere, false) { + return; + } + // If we have an aabb, do aabb-based frustum culling + if !frustum.intersects_obb(model_aabb, &model, false) { + return; + } } - } - computed_visibility.is_visible = true; - visible_entities.entities.push(entity); - } + computed_visibility.is_visible = true; + let cell = thread_queues.get_or_default(); + let mut queue = cell.take(); + queue.push(entity); + cell.set(queue); + }, + ); - // TODO: check for big changes in visible entities len() vs capacity() (ex: 2x) and resize - // to prevent holding unneeded memory + for cell in thread_queues.iter_mut() { + visible_entities.entities.append(cell.get_mut()); + } } } diff --git a/crates/bevy_scene/Cargo.toml b/crates/bevy_scene/Cargo.toml index a72240904d096..49313b486b37b 100644 --- a/crates/bevy_scene/Cargo.toml +++ b/crates/bevy_scene/Cargo.toml @@ -12,14 +12,16 @@ keywords = ["bevy"] # bevy bevy_app = { path = "../bevy_app", version = "0.8.0-dev" } bevy_asset = { path = "../bevy_asset", version = "0.8.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.8.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev" } bevy_reflect = { path = "../bevy_reflect", version = "0.8.0-dev", features = ["bevy"] } bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.8.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.8.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } # other serde = { version = "1.0", features = ["derive"] } ron = "0.7.0" -uuid = { version = "0.8", features = ["v4", "serde"] } +uuid = { version = "1.1", features = ["v4", "serde"] } anyhow = "1.0.4" thiserror = "1.0" diff --git a/crates/bevy_scene/src/bundle.rs b/crates/bevy_scene/src/bundle.rs new file mode 100644 index 0000000000000..931de37556642 --- /dev/null +++ b/crates/bevy_scene/src/bundle.rs @@ -0,0 +1,74 @@ +use bevy_asset::Handle; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + bundle::Bundle, + change_detection::ResMut, + entity::Entity, + prelude::{Changed, Component, Without}, + system::{Commands, Query}, +}; +use bevy_transform::components::{GlobalTransform, Transform}; + +use crate::{DynamicScene, InstanceId, Scene, SceneSpawner}; + +/// [`InstanceId`] of a spawned scene. It can be used with the [`SceneSpawner`] to +/// interact with the spawned scene. +#[derive(Component, Deref, DerefMut)] +pub struct SceneInstance(InstanceId); + +/// A component bundle for a [`Scene`] root. +/// +/// The scene from `scene` will be spawn as a child of the entity with this component. +/// Once it's spawned, the entity will have a [`SceneInstance`] component. +#[derive(Default, Bundle)] +pub struct SceneBundle { + /// Handle to the scene to spawn + pub scene: Handle, + pub transform: Transform, + pub global_transform: GlobalTransform, +} + +/// A component bundle for a [`DynamicScene`] root. +/// +/// The dynamic scene from `scene` will be spawn as a child of the entity with this component. +/// Once it's spawned, the entity will have a [`SceneInstance`] component. +#[derive(Default, Bundle)] +pub struct DynamicSceneBundle { + /// Handle to the scene to spawn + pub scene: Handle, + pub transform: Transform, + pub global_transform: GlobalTransform, +} + +/// System that will spawn scenes from [`SceneBundle`]. +pub fn scene_spawner( + mut commands: Commands, + mut scene_to_spawn: Query< + (Entity, &Handle, Option<&mut SceneInstance>), + (Changed>, Without>), + >, + mut dynamic_scene_to_spawn: Query< + (Entity, &Handle, Option<&mut SceneInstance>), + (Changed>, Without>), + >, + mut scene_spawner: ResMut, +) { + for (entity, scene, instance) in scene_to_spawn.iter_mut() { + let new_instance = scene_spawner.spawn_as_child(scene.clone(), entity); + if let Some(mut old_instance) = instance { + scene_spawner.despawn_instance(**old_instance); + *old_instance = SceneInstance(new_instance); + } else { + commands.entity(entity).insert(SceneInstance(new_instance)); + } + } + for (entity, dynamic_scene, instance) in dynamic_scene_to_spawn.iter_mut() { + let new_instance = scene_spawner.spawn_dynamic_as_child(dynamic_scene.clone(), entity); + if let Some(mut old_instance) = instance { + scene_spawner.despawn_instance(**old_instance); + *old_instance = SceneInstance(new_instance); + } else { + commands.entity(entity).insert(SceneInstance(new_instance)); + } + } +} diff --git a/crates/bevy_scene/src/command.rs b/crates/bevy_scene/src/command.rs deleted file mode 100644 index 55b8c4ec85db8..0000000000000 --- a/crates/bevy_scene/src/command.rs +++ /dev/null @@ -1,56 +0,0 @@ -use bevy_asset::Handle; -use bevy_ecs::{ - entity::Entity, - system::{Command, Commands}, - world::World, -}; -use bevy_hierarchy::ChildBuilder; - -use crate::{Scene, SceneSpawner}; - -pub struct SpawnScene { - scene_handle: Handle, -} - -impl Command for SpawnScene { - fn write(self, world: &mut World) { - let mut spawner = world.resource_mut::(); - spawner.spawn(self.scene_handle); - } -} - -pub trait SpawnSceneCommands { - fn spawn_scene(&mut self, scene: Handle); -} - -impl<'w, 's> SpawnSceneCommands for Commands<'w, 's> { - fn spawn_scene(&mut self, scene_handle: Handle) { - self.add(SpawnScene { scene_handle }); - } -} - -pub struct SpawnSceneAsChild { - scene_handle: Handle, - parent: Entity, -} - -impl Command for SpawnSceneAsChild { - fn write(self, world: &mut World) { - let mut spawner = world.resource_mut::(); - spawner.spawn_as_child(self.scene_handle, self.parent); - } -} - -pub trait SpawnSceneAsChildCommands { - fn spawn_scene(&mut self, scene: Handle) -> &mut Self; -} - -impl<'w, 's, 'a> SpawnSceneAsChildCommands for ChildBuilder<'w, 's, 'a> { - fn spawn_scene(&mut self, scene_handle: Handle) -> &mut Self { - self.add_command(SpawnSceneAsChild { - scene_handle, - parent: self.parent_entity(), - }); - self - } -} diff --git a/crates/bevy_scene/src/dynamic_scene.rs b/crates/bevy_scene/src/dynamic_scene.rs index 3f2402850f548..5d77205a6ec62 100644 --- a/crates/bevy_scene/src/dynamic_scene.rs +++ b/crates/bevy_scene/src/dynamic_scene.rs @@ -9,6 +9,12 @@ use bevy_reflect::{Reflect, TypeRegistryArc, TypeUuid}; use serde::Serialize; /// A collection of serializable dynamic entities, each with its own run-time defined set of components. +/// To spawn a dynamic scene, you can use either: +/// * [`SceneSpawner::spawn_dynamic`](crate::SceneSpawner::spawn_dynamic) +/// * adding the [`DynamicSceneBundle`](crate::DynamicSceneBundle) to an entity +/// * adding the [`Handle`](bevy_asset::Handle) to an entity (the scene will only be +/// visible if the entity already has [`Transform`](bevy_transform::components::Transform) and +/// [`GlobalTransform`](bevy_transform::components::GlobalTransform) components) #[derive(Default, TypeUuid)] #[uuid = "749479b1-fb8c-4ff8-a775-623aa76014f5"] pub struct DynamicScene { @@ -145,8 +151,5 @@ where .decimal_floats(true) .indentor(" ".to_string()) .new_line("\n".to_string()); - let mut buf = Vec::new(); - let mut ron_serializer = ron::ser::Serializer::new(&mut buf, Some(pretty_config), false)?; - serialize.serialize(&mut ron_serializer)?; - Ok(String::from_utf8(buf).unwrap()) + ron::ser::to_string_pretty(&serialize, pretty_config) } diff --git a/crates/bevy_scene/src/lib.rs b/crates/bevy_scene/src/lib.rs index 7c35b9ba11fd3..bf51a8fc7b7e2 100644 --- a/crates/bevy_scene/src/lib.rs +++ b/crates/bevy_scene/src/lib.rs @@ -1,11 +1,11 @@ -mod command; +mod bundle; mod dynamic_scene; mod scene; mod scene_loader; mod scene_spawner; pub mod serde; -pub use command::*; +pub use bundle::*; pub use dynamic_scene::*; pub use scene::*; pub use scene_loader::*; @@ -13,9 +13,7 @@ pub use scene_spawner::*; pub mod prelude { #[doc(hidden)] - pub use crate::{ - DynamicScene, Scene, SceneSpawner, SpawnSceneAsChildCommands, SpawnSceneCommands, - }; + pub use crate::{DynamicScene, DynamicSceneBundle, Scene, SceneBundle, SceneSpawner}; } use bevy_app::prelude::*; @@ -34,6 +32,8 @@ impl Plugin for ScenePlugin { .add_system_to_stage( CoreStage::PreUpdate, scene_spawner_system.exclusive_system().at_end(), - ); + ) + // Systems `*_bundle_spawner` must run before `scene_spawner_system` + .add_system_to_stage(CoreStage::PreUpdate, scene_spawner); } } diff --git a/crates/bevy_scene/src/scene.rs b/crates/bevy_scene/src/scene.rs index 0a8bd50b82492..4cd871fa7283a 100644 --- a/crates/bevy_scene/src/scene.rs +++ b/crates/bevy_scene/src/scene.rs @@ -1,6 +1,12 @@ use bevy_ecs::world::World; use bevy_reflect::TypeUuid; +/// To spawn a scene, you can use either: +/// * [`SceneSpawner::spawn`](crate::SceneSpawner::spawn) +/// * adding the [`SceneBundle`](crate::SceneBundle) to an entity +/// * adding the [`Handle`](bevy_asset::Handle) to an entity (the scene will only be +/// visible if the entity already has [`Transform`](bevy_transform::components::Transform) and +/// [`GlobalTransform`](bevy_transform::components::GlobalTransform) components) #[derive(Debug, TypeUuid)] #[uuid = "c156503c-edd9-4ec7-8d33-dab392df03cd"] pub struct Scene { diff --git a/crates/bevy_scene/src/scene_spawner.rs b/crates/bevy_scene/src/scene_spawner.rs index 142d299743efd..dd6cf489f195d 100644 --- a/crates/bevy_scene/src/scene_spawner.rs +++ b/crates/bevy_scene/src/scene_spawner.rs @@ -33,9 +33,10 @@ pub struct SceneSpawner { spawned_dynamic_scenes: HashMap, Vec>, spawned_instances: HashMap, scene_asset_event_reader: ManualEventReader>, - dynamic_scenes_to_spawn: Vec>, + dynamic_scenes_to_spawn: Vec<(Handle, InstanceId)>, scenes_to_spawn: Vec<(Handle, InstanceId)>, scenes_to_despawn: Vec>, + instances_to_despawn: Vec, scenes_with_parent: Vec<(InstanceId, Entity)>, } @@ -53,7 +54,21 @@ pub enum SceneSpawnError { impl SceneSpawner { pub fn spawn_dynamic(&mut self, scene_handle: Handle) { - self.dynamic_scenes_to_spawn.push(scene_handle); + let instance_id = InstanceId::new(); + self.dynamic_scenes_to_spawn + .push((scene_handle, instance_id)); + } + + pub fn spawn_dynamic_as_child( + &mut self, + scene_handle: Handle, + parent: Entity, + ) -> InstanceId { + let instance_id = InstanceId::new(); + self.dynamic_scenes_to_spawn + .push((scene_handle, instance_id)); + self.scenes_with_parent.push((instance_id, parent)); + instance_id } pub fn spawn(&mut self, scene_handle: Handle) -> InstanceId { @@ -73,26 +88,31 @@ impl SceneSpawner { self.scenes_to_despawn.push(scene_handle); } + pub fn despawn_instance(&mut self, instance_id: InstanceId) { + self.instances_to_despawn.push(instance_id); + } + pub fn despawn_sync( &mut self, world: &mut World, scene_handle: Handle, ) -> Result<(), SceneSpawnError> { - if let Some(instance_ids) = self.spawned_dynamic_scenes.get(&scene_handle) { + if let Some(instance_ids) = self.spawned_dynamic_scenes.remove(&scene_handle) { for instance_id in instance_ids { - if let Some(instance) = self.spawned_instances.get(instance_id) { - for entity in instance.entity_map.values() { - let _ = world.despawn(entity); // Ignore the result, despawn only cares if - // it exists. - } - } + self.despawn_instance_sync(world, &instance_id); } - - self.spawned_dynamic_scenes.remove(&scene_handle); } Ok(()) } + pub fn despawn_instance_sync(&mut self, world: &mut World, instance_id: &InstanceId) { + if let Some(instance) = self.spawned_instances.remove(instance_id) { + for entity in instance.entity_map.values() { + let _ = world.despawn(entity); + } + } + } + pub fn spawn_dynamic_sync( &mut self, world: &mut World, @@ -235,14 +255,33 @@ impl SceneSpawner { Ok(()) } + pub fn despawn_queued_instances(&mut self, world: &mut World) { + let instances_to_despawn = std::mem::take(&mut self.instances_to_despawn); + + for instance_id in instances_to_despawn { + self.despawn_instance_sync(world, &instance_id); + } + } + pub fn spawn_queued_scenes(&mut self, world: &mut World) -> Result<(), SceneSpawnError> { let scenes_to_spawn = std::mem::take(&mut self.dynamic_scenes_to_spawn); - for scene_handle in scenes_to_spawn { - match self.spawn_dynamic_sync(world, &scene_handle) { - Ok(_) => {} + for (scene_handle, instance_id) in scenes_to_spawn { + let mut entity_map = EntityMap::default(); + + match Self::spawn_dynamic_internal(world, &scene_handle, &mut entity_map) { + Ok(_) => { + self.spawned_instances + .insert(instance_id, InstanceInfo { entity_map }); + let spawned = self + .spawned_dynamic_scenes + .entry(scene_handle.clone()) + .or_insert_with(Vec::new); + spawned.push(instance_id); + } Err(SceneSpawnError::NonExistentScene { .. }) => { - self.dynamic_scenes_to_spawn.push(scene_handle); + self.dynamic_scenes_to_spawn + .push((scene_handle, instance_id)); } Err(err) => return Err(err), } @@ -327,6 +366,7 @@ pub fn scene_spawner_system(world: &mut World) { } scene_spawner.despawn_queued_scenes(world).unwrap(); + scene_spawner.despawn_queued_instances(world); scene_spawner .spawn_queued_scenes(world) .unwrap_or_else(|err| panic!("{}", err)); diff --git a/crates/bevy_sprite/src/lib.rs b/crates/bevy_sprite/src/lib.rs index b24833d52c0ef..a7be1a4ab983b 100644 --- a/crates/bevy_sprite/src/lib.rs +++ b/crates/bevy_sprite/src/lib.rs @@ -30,7 +30,7 @@ pub use texture_atlas_builder::*; use bevy_app::prelude::*; use bevy_asset::{AddAsset, Assets, HandleUntyped}; -use bevy_core_pipeline::Transparent2d; +use bevy_core_pipeline::core_2d::Transparent2d; use bevy_ecs::schedule::{ParallelSystemDescriptorCoercion, SystemLabel}; use bevy_reflect::TypeUuid; use bevy_render::{ diff --git a/crates/bevy_sprite/src/mesh2d/color_material.rs b/crates/bevy_sprite/src/mesh2d/color_material.rs index 5ea84c802c8e9..0721b1a0a607d 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.rs +++ b/crates/bevy_sprite/src/mesh2d/color_material.rs @@ -7,10 +7,7 @@ use bevy_render::{ color::Color, prelude::Shader, render_asset::{PrepareAssetError, RenderAsset, RenderAssets}, - render_resource::{ - std140::{AsStd140, Std140}, - *, - }, + render_resource::*, renderer::RenderDevice, texture::Image, }; @@ -92,7 +89,7 @@ bitflags::bitflags! { } /// The GPU representation of the uniform data of a [`ColorMaterial`]. -#[derive(Clone, Default, AsStd140)] +#[derive(Clone, Default, ShaderType)] pub struct ColorMaterialUniformData { pub color: Vec4, pub flags: u32, @@ -145,12 +142,15 @@ impl RenderAsset for ColorMaterial { color: material.color.as_linear_rgba_f32().into(), flags: flags.bits(), }; - let value_std140 = value.as_std140(); + + let byte_buffer = [0u8; ColorMaterialUniformData::SIZE.get() as usize]; + let mut buffer = encase::UniformBuffer::new(byte_buffer); + buffer.write(&value).unwrap(); let buffer = render_device.create_buffer_with_data(&BufferInitDescriptor { label: Some("color_material_uniform_buffer"), usage: BufferUsages::UNIFORM | BufferUsages::COPY_DST, - contents: value_std140.as_bytes(), + contents: buffer.as_ref(), }); let bind_group = render_device.create_bind_group(&BindGroupDescriptor { entries: &[ @@ -201,9 +201,7 @@ impl Material2d for ColorMaterial { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: BufferSize::new( - ColorMaterialUniformData::std140_size_static() as u64, - ), + min_binding_size: Some(ColorMaterialUniformData::min_size()), }, count: None, }, diff --git a/crates/bevy_sprite/src/mesh2d/color_material.wgsl b/crates/bevy_sprite/src/mesh2d/color_material.wgsl index a11e48094f3c6..24b55e448177d 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.wgsl +++ b/crates/bevy_sprite/src/mesh2d/color_material.wgsl @@ -1,5 +1,5 @@ -#import bevy_sprite::mesh2d_view_bind_group -#import bevy_sprite::mesh2d_struct +#import bevy_sprite::mesh2d_types +#import bevy_sprite::mesh2d_view_bindings struct ColorMaterial { color: vec4; @@ -8,9 +8,6 @@ struct ColorMaterial { }; let COLOR_MATERIAL_FLAGS_TEXTURE_BIT: u32 = 1u; -[[group(0), binding(0)]] -var view: View; - [[group(1), binding(0)]] var material: ColorMaterial; [[group(1), binding(1)]] @@ -29,13 +26,20 @@ struct FragmentInput { #ifdef VERTEX_TANGENTS [[location(3)]] world_tangent: vec4; #endif +#ifdef VERTEX_COLORS + [[location(4)]] colors: vec4; +#endif }; [[stage(fragment)]] fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { var output_color: vec4 = material.color; if ((material.flags & COLOR_MATERIAL_FLAGS_TEXTURE_BIT) != 0u) { +#ifdef VERTEX_COLORS + output_color = output_color * textureSample(texture, texture_sampler, in.uv) * in.colors; +#else output_color = output_color * textureSample(texture, texture_sampler, in.uv); +#endif } return output_color; -} \ No newline at end of file +} diff --git a/crates/bevy_sprite/src/mesh2d/material.rs b/crates/bevy_sprite/src/mesh2d/material.rs index 77cb3c4ebe92f..9ed1ce540757b 100644 --- a/crates/bevy_sprite/src/mesh2d/material.rs +++ b/crates/bevy_sprite/src/mesh2d/material.rs @@ -1,6 +1,6 @@ use bevy_app::{App, Plugin}; use bevy_asset::{AddAsset, Asset, AssetServer, Handle}; -use bevy_core_pipeline::Transparent2d; +use bevy_core_pipeline::core_2d::Transparent2d; use bevy_ecs::{ entity::Entity, prelude::{Bundle, World}, @@ -12,9 +12,9 @@ use bevy_ecs::{ }; use bevy_log::error; use bevy_render::{ + extract_component::ExtractComponentPlugin, mesh::{Mesh, MeshVertexBufferLayout}, render_asset::{RenderAsset, RenderAssetPlugin, RenderAssets}, - render_component::ExtractComponentPlugin, render_phase::{ AddRenderCommand, DrawFunctions, EntityRenderCommand, RenderCommandResult, RenderPhase, SetItemPipeline, TrackedRenderPass, diff --git a/crates/bevy_sprite/src/mesh2d/mesh.rs b/crates/bevy_sprite/src/mesh2d/mesh.rs index eb7f40b751108..7a6ebfeadff93 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh.rs +++ b/crates/bevy_sprite/src/mesh2d/mesh.rs @@ -2,18 +2,20 @@ use bevy_app::Plugin; use bevy_asset::{load_internal_asset, Handle, HandleUntyped}; use bevy_ecs::{ prelude::*, - system::{lifetimeless::*, SystemParamItem}, + system::{lifetimeless::*, SystemParamItem, SystemState}, }; use bevy_math::{Mat4, Vec2}; use bevy_reflect::{Reflect, TypeUuid}; use bevy_render::{ + extract_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin}, mesh::{GpuBufferInfo, Mesh, MeshVertexBufferLayout}, render_asset::RenderAssets, - render_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin}, render_phase::{EntityRenderCommand, RenderCommandResult, TrackedRenderPass}, - render_resource::{std140::AsStd140, *}, + render_resource::*, renderer::{RenderDevice, RenderQueue}, - texture::{BevyDefault, GpuImage, Image, TextureFormatPixelInfo}, + texture::{ + BevyDefault, DefaultImageSampler, GpuImage, Image, ImageSampler, TextureFormatPixelInfo, + }, view::{ComputedVisibility, ExtractedView, ViewUniform, ViewUniformOffset, ViewUniforms}, RenderApp, RenderStage, }; @@ -35,28 +37,52 @@ impl From> for Mesh2dHandle { #[derive(Default)] pub struct Mesh2dRenderPlugin; -pub const MESH2D_VIEW_BIND_GROUP_HANDLE: HandleUntyped = +pub const MESH2D_VIEW_TYPES_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 12677582416765805110); +pub const MESH2D_VIEW_BINDINGS_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 6901431444735842434); -pub const MESH2D_STRUCT_HANDLE: HandleUntyped = +pub const MESH2D_TYPES_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 8994673400261890424); +pub const MESH2D_BINDINGS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 8983617858458862856); +pub const MESH2D_FUNCTIONS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 4976379308250389413); pub const MESH2D_SHADER_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 2971387252468633715); impl Plugin for Mesh2dRenderPlugin { fn build(&self, app: &mut bevy_app::App) { - load_internal_asset!(app, MESH2D_SHADER_HANDLE, "mesh2d.wgsl", Shader::from_wgsl); load_internal_asset!( app, - MESH2D_STRUCT_HANDLE, - "mesh2d_struct.wgsl", + MESH2D_VIEW_TYPES_HANDLE, + "mesh2d_view_types.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + MESH2D_VIEW_BINDINGS_HANDLE, + "mesh2d_view_bindings.wgsl", Shader::from_wgsl ); load_internal_asset!( app, - MESH2D_VIEW_BIND_GROUP_HANDLE, - "mesh2d_view_bind_group.wgsl", + MESH2D_TYPES_HANDLE, + "mesh2d_types.wgsl", Shader::from_wgsl ); + load_internal_asset!( + app, + MESH2D_BINDINGS_HANDLE, + "mesh2d_bindings.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + MESH2D_FUNCTIONS_HANDLE, + "mesh2d_functions.wgsl", + Shader::from_wgsl + ); + load_internal_asset!(app, MESH2D_SHADER_HANDLE, "mesh2d.wgsl", Shader::from_wgsl); app.add_plugin(UniformComponentPlugin::::default()); @@ -71,7 +97,7 @@ impl Plugin for Mesh2dRenderPlugin { } } -#[derive(Component, AsStd140, Clone)] +#[derive(Component, ShaderType, Clone)] pub struct Mesh2dUniform { pub transform: Mat4, pub inverse_transpose_model: Mat4, @@ -124,7 +150,9 @@ pub struct Mesh2dPipeline { impl FromWorld for Mesh2dPipeline { fn from_world(world: &mut World) -> Self { - let render_device = world.resource::(); + let mut system_state: SystemState<(Res, Res)> = + SystemState::new(world); + let (render_device, default_sampler) = system_state.get_mut(world); let view_layout = render_device.create_bind_group_layout(&BindGroupLayoutDescriptor { entries: &[ // View @@ -134,7 +162,7 @@ impl FromWorld for Mesh2dPipeline { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: BufferSize::new(ViewUniform::std140_size_static() as u64), + min_binding_size: Some(ViewUniform::min_size()), }, count: None, }, @@ -149,7 +177,7 @@ impl FromWorld for Mesh2dPipeline { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: BufferSize::new(Mesh2dUniform::std140_size_static() as u64), + min_binding_size: Some(Mesh2dUniform::min_size()), }, count: None, }], @@ -164,7 +192,10 @@ impl FromWorld for Mesh2dPipeline { TextureFormat::bevy_default(), ); let texture = render_device.create_texture(&image.texture_descriptor); - let sampler = render_device.create_sampler(&image.sampler_descriptor); + let sampler = match image.sampler_descriptor { + ImageSampler::Default => (**default_sampler).clone(), + ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + }; let format_size = image.texture_descriptor.format.pixel_size(); let render_queue = world.resource_mut::(); @@ -300,9 +331,6 @@ impl SpecializedMeshPipeline for Mesh2dPipeline { vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(4)); } - #[cfg(feature = "webgl")] - shader_defs.push(String::from("NO_ARRAY_TEXTURES_SUPPORT")); - let vertex_buffer_layout = layout.get_layout(&vertex_attributes)?; Ok(RenderPipelineDescriptor { diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl index 1e2b02eb64811..c1d29aef33419 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl @@ -1,5 +1,8 @@ -#import bevy_sprite::mesh2d_view_bind_group -#import bevy_sprite::mesh2d_struct +#import bevy_sprite::mesh2d_view_bindings +#import bevy_sprite::mesh2d_bindings + +// NOTE: Bindings must come before functions that use them! +#import bevy_sprite::mesh2d_functions struct Vertex { [[location(0)]] position: vec3; @@ -8,6 +11,9 @@ struct Vertex { #ifdef VERTEX_TANGENTS [[location(3)]] tangent: vec4; #endif +#ifdef VERTEX_COLORS + [[location(4)]] colors: vec4; +#endif }; struct VertexOutput { @@ -18,36 +24,23 @@ struct VertexOutput { #ifdef VERTEX_TANGENTS [[location(3)]] world_tangent: vec4; #endif +#ifdef VERTEX_COLORS + [[location(4)]] colors: vec4; +#endif }; -[[group(0), binding(0)]] -var view: View; - -[[group(2), binding(0)]] -var mesh: Mesh2d; - [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { - let world_position = mesh.model * vec4(vertex.position, 1.0); - var out: VertexOutput; out.uv = vertex.uv; - out.world_position = world_position; - out.clip_position = view.view_proj * world_position; - out.world_normal = mat3x3( - mesh.inverse_transpose_model[0].xyz, - mesh.inverse_transpose_model[1].xyz, - mesh.inverse_transpose_model[2].xyz - ) * vertex.normal; + out.world_position = mesh2d_position_local_to_world(mesh.model, vec4(vertex.position, 1.0)); + out.clip_position = mesh2d_position_world_to_clip(out.world_position); + out.world_normal = mesh2d_normal_local_to_world(vertex.normal); #ifdef VERTEX_TANGENTS - out.world_tangent = vec4( - mat3x3( - mesh.model[0].xyz, - mesh.model[1].xyz, - mesh.model[2].xyz - ) * vertex.tangent.xyz, - vertex.tangent.w - ); + out.world_tangent = mesh2d_tangent_local_to_world(vertex.tangent); +#endif +#ifdef VERTEX_COLORS + out.colors = vertex.colors; #endif return out; } @@ -65,4 +58,4 @@ struct FragmentInput { [[stage(fragment)]] fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { return vec4(1.0, 0.0, 1.0, 1.0); -} \ No newline at end of file +} diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl new file mode 100644 index 0000000000000..1bb2ec5959b8e --- /dev/null +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl @@ -0,0 +1,6 @@ +#define_import_path bevy_sprite::mesh2d_bindings + +#import bevy_sprite::mesh2d_types + +[[group(2), binding(0)]] +var mesh: Mesh2d; diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl new file mode 100644 index 0000000000000..5342b638494de --- /dev/null +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl @@ -0,0 +1,36 @@ +#define_import_path bevy_sprite::mesh2d_functions + +fn mesh2d_position_local_to_world(model: mat4x4, vertex_position: vec4) -> vec4 { + return model * vertex_position; +} + +fn mesh2d_position_world_to_clip(world_position: vec4) -> vec4 { + return view.view_proj * world_position; +} + +// NOTE: The intermediate world_position assignment is important +// for precision purposes when using the 'equals' depth comparison +// function. +fn mesh2d_position_local_to_clip(model: mat4x4, vertex_position: vec4) -> vec4 { + let world_position = mesh2d_position_local_to_world(model, vertex_position); + return mesh2d_position_world_to_clip(world_position); +} + +fn mesh2d_normal_local_to_world(vertex_normal: vec3) -> vec3 { + return mat3x3( + mesh.inverse_transpose_model[0].xyz, + mesh.inverse_transpose_model[1].xyz, + mesh.inverse_transpose_model[2].xyz + ) * vertex_normal; +} + +fn mesh2d_tangent_local_to_world(model: mat4x4, vertex_tangent: vec4) -> vec4 { + return vec4( + mat3x3( + model[0].xyz, + model[1].xyz, + model[2].xyz + ) * vertex_tangent.xyz, + vertex_tangent.w + ); +} diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_struct.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl similarity index 81% rename from crates/bevy_sprite/src/mesh2d/mesh2d_struct.wgsl rename to crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl index e60d07c33bbd2..83f83366eb25c 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_struct.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl @@ -1,4 +1,4 @@ -#define_import_path bevy_sprite::mesh2d_struct +#define_import_path bevy_sprite::mesh2d_types struct Mesh2d { model: mat4x4; diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl new file mode 100644 index 0000000000000..fedf12bff0072 --- /dev/null +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl @@ -0,0 +1,6 @@ +#define_import_path bevy_sprite::mesh2d_view_bindings + +#import bevy_sprite::mesh2d_view_types + +[[group(0), binding(0)]] +var view: View; diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_view_bind_group.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_view_types.wgsl similarity index 77% rename from crates/bevy_sprite/src/mesh2d/mesh2d_view_bind_group.wgsl rename to crates/bevy_sprite/src/mesh2d/mesh2d_view_types.wgsl index b85d542fc0e0f..2e54a898e5621 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_view_bind_group.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_view_types.wgsl @@ -1,4 +1,4 @@ -#define_import_path bevy_sprite::mesh2d_view_bind_group +#define_import_path bevy_sprite::mesh2d_view_types struct View { view_proj: mat4x4; diff --git a/crates/bevy_sprite/src/render/mod.rs b/crates/bevy_sprite/src/render/mod.rs index 25b155fd6531f..c0066482014f7 100644 --- a/crates/bevy_sprite/src/render/mod.rs +++ b/crates/bevy_sprite/src/render/mod.rs @@ -5,7 +5,7 @@ use crate::{ Rect, Sprite, SPRITE_SHADER_HANDLE, }; use bevy_asset::{AssetEvent, Assets, Handle, HandleId}; -use bevy_core_pipeline::Transparent2d; +use bevy_core_pipeline::core_2d::Transparent2d; use bevy_ecs::{ prelude::*, system::{lifetimeless::*, SystemParamItem}, @@ -19,7 +19,7 @@ use bevy_render::{ BatchedPhaseItem, DrawFunctions, EntityRenderCommand, RenderCommand, RenderCommandResult, RenderPhase, SetItemPipeline, TrackedRenderPass, }, - render_resource::{std140::AsStd140, *}, + render_resource::*, renderer::{RenderDevice, RenderQueue}, texture::{BevyDefault, Image}, view::{Msaa, ViewUniform, ViewUniformOffset, ViewUniforms, Visibility}, @@ -48,7 +48,7 @@ impl FromWorld for SpritePipeline { ty: BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: BufferSize::new(ViewUniform::std140_size_static() as u64), + min_binding_size: Some(ViewUniform::min_size()), }, count: None, }], @@ -354,8 +354,9 @@ pub fn queue_sprites( for event in &events.images { match event { AssetEvent::Created { .. } => None, - AssetEvent::Modified { handle } => image_bind_groups.values.remove(handle), - AssetEvent::Removed { handle } => image_bind_groups.values.remove(handle), + AssetEvent::Modified { handle } | AssetEvent::Removed { handle } => { + image_bind_groups.values.remove(handle) + } }; } @@ -501,10 +502,10 @@ pub fn queue_sprites( // Store the vertex data and add the item to the render phase if current_batch.colored { - for i in QUAD_INDICES.iter() { + for i in QUAD_INDICES { sprite_meta.colored_vertices.push(ColoredSpriteVertex { - position: positions[*i], - uv: uvs[*i].into(), + position: positions[i], + uv: uvs[i].into(), color: extracted_sprite.color.as_linear_rgba_f32(), }); } @@ -520,10 +521,10 @@ pub fn queue_sprites( batch_range: Some(item_start..item_end), }); } else { - for i in QUAD_INDICES.iter() { + for i in QUAD_INDICES { sprite_meta.vertices.push(SpriteVertex { - position: positions[*i], - uv: uvs[*i].into(), + position: positions[i], + uv: uvs[i].into(), }); } let item_start = index; diff --git a/crates/bevy_sprite/src/render/sprite.wgsl b/crates/bevy_sprite/src/render/sprite.wgsl index 99015d2f9eff1..e04559dd00412 100644 --- a/crates/bevy_sprite/src/render/sprite.wgsl +++ b/crates/bevy_sprite/src/render/sprite.wgsl @@ -28,7 +28,7 @@ fn vertex( out.color = vertex_color; #endif return out; -} +} [[group(1), binding(0)]] var sprite_texture: texture_2d; @@ -37,9 +37,9 @@ var sprite_sampler: sampler; [[stage(fragment)]] fn fragment(in: VertexOutput) -> [[location(0)]] vec4 { - var color = textureSample(sprite_texture, sprite_sampler, in.uv); + var color = textureSample(sprite_texture, sprite_sampler, in.uv); #ifdef COLORED color = in.color * color; #endif return color; -} \ No newline at end of file +} diff --git a/crates/bevy_sprite/src/texture_atlas.rs b/crates/bevy_sprite/src/texture_atlas.rs index c290f45416a1f..bc3cea9400f87 100644 --- a/crates/bevy_sprite/src/texture_atlas.rs +++ b/crates/bevy_sprite/src/texture_atlas.rs @@ -68,25 +68,27 @@ impl TextureAtlas { } /// Generate a `TextureAtlas` by splitting a texture into a grid where each - /// cell of the grid of `tile_size` is one of the textures in the atlas + /// `tile_size` by `tile_size` grid-cell is one of the textures in the atlas pub fn from_grid( texture: Handle, tile_size: Vec2, columns: usize, rows: usize, ) -> TextureAtlas { - Self::from_grid_with_padding(texture, tile_size, columns, rows, Vec2::new(0f32, 0f32)) + Self::from_grid_with_padding(texture, tile_size, columns, rows, Vec2::ZERO, Vec2::ZERO) } /// Generate a `TextureAtlas` by splitting a texture into a grid where each - /// cell of the grid of `tile_size` is one of the textures in the atlas and is separated by - /// some `padding` in the texture + /// `tile_size` by `tile_size` grid-cell is one of the textures in the + /// atlas. Grid cells are separated by some `padding`, and the grid starts + /// at `offset` pixels from the top left corner. pub fn from_grid_with_padding( texture: Handle, tile_size: Vec2, columns: usize, rows: usize, padding: Vec2, + offset: Vec2, ) -> TextureAtlas { let mut sprites = Vec::new(); let mut x_padding = 0.0; @@ -102,8 +104,8 @@ impl TextureAtlas { } let rect_min = Vec2::new( - (tile_size.x + x_padding) * x as f32, - (tile_size.y + y_padding) * y as f32, + (tile_size.x + x_padding) * x as f32 + offset.x, + (tile_size.y + y_padding) * y as f32 + offset.y, ); sprites.push(Rect { diff --git a/crates/bevy_tasks/Cargo.toml b/crates/bevy_tasks/Cargo.toml index 7b83b9bc344bd..06a0da456931a 100644 --- a/crates/bevy_tasks/Cargo.toml +++ b/crates/bevy_tasks/Cargo.toml @@ -13,7 +13,8 @@ futures-lite = "1.4.0" event-listener = "2.5.2" async-executor = "1.3.0" async-channel = "1.4.2" -num_cpus = "1.0.1" +num_cpus = "1" +once_cell = "1.7" [target.'cfg(target_arch = "wasm32")'.dependencies] wasm-bindgen-futures = "0.4" diff --git a/crates/bevy_tasks/src/task_pool.rs b/crates/bevy_tasks/src/task_pool.rs index ebd6ba6b41f4c..1d0f86e7cb5ed 100644 --- a/crates/bevy_tasks/src/task_pool.rs +++ b/crates/bevy_tasks/src/task_pool.rs @@ -60,29 +60,9 @@ impl TaskPoolBuilder { } } -#[derive(Debug)] -struct TaskPoolInner { - threads: Vec>, - shutdown_tx: async_channel::Sender<()>, -} - -impl Drop for TaskPoolInner { - fn drop(&mut self) { - self.shutdown_tx.close(); - - let panicking = thread::panicking(); - for join_handle in self.threads.drain(..) { - let res = join_handle.join(); - if !panicking { - res.expect("Task thread panicked while executing."); - } - } - } -} - /// A thread pool for executing tasks. Tasks are futures that are being automatically driven by /// the pool on threads owned by the pool. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct TaskPool { /// The executor for the pool /// @@ -92,7 +72,8 @@ pub struct TaskPool { executor: Arc>, /// Inner state of the pool - inner: Arc, + threads: Vec>, + shutdown_tx: async_channel::Sender<()>, } impl TaskPool { @@ -155,16 +136,14 @@ impl TaskPool { Self { executor, - inner: Arc::new(TaskPoolInner { - threads, - shutdown_tx, - }), + threads, + shutdown_tx, } } /// Return the number of threads owned by the task pool pub fn thread_num(&self) -> usize { - self.inner.threads.len() + self.threads.len() } /// Allows spawning non-`'static` futures on the thread pool. The function takes a callback, @@ -268,6 +247,20 @@ impl Default for TaskPool { } } +impl Drop for TaskPool { + fn drop(&mut self) { + self.shutdown_tx.close(); + + let panicking = thread::panicking(); + for join_handle in self.threads.drain(..) { + let res = join_handle.join(); + if !panicking { + res.expect("Task thread panicked while executing."); + } + } + } +} + /// A `TaskPool` scope for running one or more non-`'static` futures. /// /// For more information, see [`TaskPool::scope`]. diff --git a/crates/bevy_tasks/src/usages.rs b/crates/bevy_tasks/src/usages.rs index 923c1a7eb4eab..419d842f47168 100644 --- a/crates/bevy_tasks/src/usages.rs +++ b/crates/bevy_tasks/src/usages.rs @@ -11,12 +11,35 @@ //! for consumption. (likely via channels) use super::TaskPool; +use once_cell::sync::OnceCell; use std::ops::Deref; +static COMPUTE_TASK_POOL: OnceCell = OnceCell::new(); +static ASYNC_COMPUTE_TASK_POOL: OnceCell = OnceCell::new(); +static IO_TASK_POOL: OnceCell = OnceCell::new(); + /// A newtype for a task pool for CPU-intensive work that must be completed to deliver the next /// frame -#[derive(Clone, Debug)] -pub struct ComputeTaskPool(pub TaskPool); +#[derive(Debug)] +pub struct ComputeTaskPool(TaskPool); + +impl ComputeTaskPool { + /// Initializes the global [`ComputeTaskPool`] instance. + pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { + COMPUTE_TASK_POOL.get_or_init(|| Self(f())) + } + + /// Gets the global [`ComputeTaskPool`] instance. + /// + /// # Panics + /// Panics if no pool has been initialized yet. + pub fn get() -> &'static Self { + COMPUTE_TASK_POOL.get().expect( + "A ComputeTaskPool has not been initialized yet. Please call \ + ComputeTaskPool::init beforehand.", + ) + } +} impl Deref for ComputeTaskPool { type Target = TaskPool; @@ -27,8 +50,26 @@ impl Deref for ComputeTaskPool { } /// A newtype for a task pool for CPU-intensive work that may span across multiple frames -#[derive(Clone, Debug)] -pub struct AsyncComputeTaskPool(pub TaskPool); +#[derive(Debug)] +pub struct AsyncComputeTaskPool(TaskPool); + +impl AsyncComputeTaskPool { + /// Initializes the global [`AsyncComputeTaskPool`] instance. + pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { + ASYNC_COMPUTE_TASK_POOL.get_or_init(|| Self(f())) + } + + /// Gets the global [`AsyncComputeTaskPool`] instance. + /// + /// # Panics + /// Panics if no pool has been initialized yet. + pub fn get() -> &'static Self { + ASYNC_COMPUTE_TASK_POOL.get().expect( + "A AsyncComputeTaskPool has not been initialized yet. Please call \ + AsyncComputeTaskPool::init beforehand.", + ) + } +} impl Deref for AsyncComputeTaskPool { type Target = TaskPool; @@ -40,8 +81,26 @@ impl Deref for AsyncComputeTaskPool { /// A newtype for a task pool for IO-intensive work (i.e. tasks that spend very little time in a /// "woken" state) -#[derive(Clone, Debug)] -pub struct IoTaskPool(pub TaskPool); +#[derive(Debug)] +pub struct IoTaskPool(TaskPool); + +impl IoTaskPool { + /// Initializes the global [`IoTaskPool`] instance. + pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { + IO_TASK_POOL.get_or_init(|| Self(f())) + } + + /// Gets the global [`IoTaskPool`] instance. + /// + /// # Panics + /// Panics if no pool has been initialized yet. + pub fn get() -> &'static Self { + IO_TASK_POOL.get().expect( + "A IoTaskPool has not been initialized yet. Please call \ + IoTaskPool::init beforehand.", + ) + } +} impl Deref for IoTaskPool { type Target = TaskPool; diff --git a/crates/bevy_text/src/font_atlas_set.rs b/crates/bevy_text/src/font_atlas_set.rs index 34a1a843c451a..4549f474bf515 100644 --- a/crates/bevy_text/src/font_atlas_set.rs +++ b/crates/bevy_text/src/font_atlas_set.rs @@ -76,10 +76,18 @@ impl FontAtlasSet { ) }; if !font_atlases.iter_mut().any(add_char_to_font_atlas) { + // Find the largest dimension of the glyph, either its width or its height + let glyph_max_size: u32 = glyph_texture + .texture_descriptor + .size + .height + .max(glyph_texture.texture_descriptor.size.width); + // Pick the higher of 512 or the smallest power of 2 greater than glyph_max_size + let containing = (1u32 << (32 - glyph_max_size.leading_zeros())).max(512) as f32; font_atlases.push(FontAtlas::new( textures, texture_atlases, - Vec2::new(512.0, 512.0), + Vec2::new(containing, containing), )); if !font_atlases.last_mut().unwrap().add_glyph( textures, diff --git a/crates/bevy_text/src/pipeline.rs b/crates/bevy_text/src/pipeline.rs index 5b728552513ab..60196fe2787ce 100644 --- a/crates/bevy_text/src/pipeline.rs +++ b/crates/bevy_text/src/pipeline.rs @@ -66,7 +66,7 @@ impl TextPipeline { .iter() .map(|section| { let font = fonts - .get(section.style.font.id) + .get(§ion.style.font) .ok_or(TextError::NoSuchFont)?; let font_id = self.get_or_insert_font_id(§ion.style.font, font); let font_size = scale_value(section.style.font_size, scale_factor); diff --git a/crates/bevy_text/src/text2d.rs b/crates/bevy_text/src/text2d.rs index a9eefb7528d36..d0bebbad3609a 100644 --- a/crates/bevy_text/src/text2d.rs +++ b/crates/bevy_text/src/text2d.rs @@ -48,7 +48,7 @@ impl Default for Text2dBounds { } } -/// The bundle of components needed to draw text in a 2D scene via a 2D `OrthographicCameraBundle`. +/// The bundle of components needed to draw text in a 2D scene via a 2D `Camera2dBundle`. /// [Example usage.](https://github.com/bevyengine/bevy/blob/latest/examples/2d/text2d.rs) #[derive(Bundle, Clone, Debug, Default)] pub struct Text2dBundle { @@ -98,7 +98,7 @@ pub fn extract_text2d_sprite( .color .as_rgba_linear(); let atlas = texture_atlases - .get(text_glyph.atlas_info.texture_atlas.clone_weak()) + .get(&text_glyph.atlas_info.texture_atlas) .unwrap(); let handle = atlas.texture.clone_weak(); let index = text_glyph.atlas_info.glyph_index as usize; @@ -127,7 +127,7 @@ pub fn extract_text2d_sprite( /// Updates the layout and size information whenever the text or style is changed. /// This information is computed by the `TextPipeline` on insertion, then stored. -#[allow(clippy::too_many_arguments, clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] pub fn update_text2d_layout( // Text items which should be reprocessed again, generally when the font hasn't loaded yet. mut queue: Local>, diff --git a/crates/bevy_time/Cargo.toml b/crates/bevy_time/Cargo.toml new file mode 100644 index 0000000000000..b1b2c03ff8bf7 --- /dev/null +++ b/crates/bevy_time/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "bevy_time" +version = "0.8.0-dev" +edition = "2021" +description = "Provides time functionality for Bevy Engine" +homepage = "https://bevyengine.org" +repository = "https://github.com/bevyengine/bevy" +license = "MIT OR Apache-2.0" +keywords = ["bevy"] + + +[dependencies] +# bevy +bevy_app = { path = "../bevy_app", version = "0.8.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.8.0-dev", features = ["bevy_reflect"] } +bevy_reflect = { path = "../bevy_reflect", version = "0.8.0-dev", features = ["bevy"] } +bevy_utils = { path = "../bevy_utils", version = "0.8.0-dev" } diff --git a/crates/bevy_core/src/time/fixed_timestep.rs b/crates/bevy_time/src/fixed_timestep.rs similarity index 100% rename from crates/bevy_core/src/time/fixed_timestep.rs rename to crates/bevy_time/src/fixed_timestep.rs diff --git a/crates/bevy_time/src/lib.rs b/crates/bevy_time/src/lib.rs new file mode 100644 index 0000000000000..dc1f48552031c --- /dev/null +++ b/crates/bevy_time/src/lib.rs @@ -0,0 +1,46 @@ +mod fixed_timestep; +mod stopwatch; +#[allow(clippy::module_inception)] +mod time; +mod timer; + +pub use fixed_timestep::*; +pub use stopwatch::*; +pub use time::*; +pub use timer::*; + +pub mod prelude { + //! The Bevy Time Prelude. + #[doc(hidden)] + pub use crate::{Time, Timer}; +} + +use bevy_app::prelude::*; +use bevy_ecs::prelude::*; + +/// Adds time functionality to Apps. +#[derive(Default)] +pub struct TimePlugin; + +#[derive(Debug, PartialEq, Eq, Clone, Hash, SystemLabel)] +/// Updates the elapsed time. Any system that interacts with [Time] component should run after +/// this. +pub struct TimeSystem; + +impl Plugin for TimePlugin { + fn build(&self, app: &mut App) { + app.init_resource::