diff --git a/examples_tests b/examples_tests index 501510d717..c693ddb054 160000 --- a/examples_tests +++ b/examples_tests @@ -1 +1 @@ -Subproject commit 501510d71705081575dfe28718c687772a339f89 +Subproject commit c693ddb0541143adc5c2f757836d2f714fed4d2c diff --git a/include/nbl/builtin/hlsl/math/quaternions.hlsl b/include/nbl/builtin/hlsl/math/quaternions.hlsl index 49a8f95d22..9cd344c109 100644 --- a/include/nbl/builtin/hlsl/math/quaternions.hlsl +++ b/include/nbl/builtin/hlsl/math/quaternions.hlsl @@ -380,18 +380,150 @@ struct static_cast_helper, math::quaternion > template struct static_cast_helper, math::quaternion > { - static inline matrix cast(const math::quaternion q) + static inline matrix cast(NBL_CONST_REF_ARG(math::quaternion) q) { return q.__constructMatrix(); } }; +template +inline bool is_finite_quaternion(NBL_CONST_REF_ARG(math::quaternion) q) +{ + return !hlsl::isnan(q.data.x) && + !hlsl::isnan(q.data.y) && + !hlsl::isnan(q.data.z) && + !hlsl::isnan(q.data.w); +} + +template +inline T score_matrix_to_quaternion_cast_candidate( + NBL_CONST_REF_ARG(matrix) target, + NBL_CONST_REF_ARG(math::quaternion) candidate) +{ + if (!is_finite_quaternion(candidate)) + return bit_cast(numeric_limits::infinity); + + const vector rebuiltRight = candidate.transformVector(vector(T(1), T(0), T(0)), true); + const vector rebuiltUp = candidate.transformVector(vector(T(0), T(1), T(0)), true); + const vector rebuiltForward = candidate.transformVector(vector(T(0), T(0), T(1)), true); + return + hlsl::length(rebuiltRight - target[0]) + + hlsl::length(rebuiltUp - target[1]) + + hlsl::length(rebuiltForward - target[2]); +} + +template +inline math::quaternion direct_matrix_to_quaternion_cast(NBL_CONST_REF_ARG(matrix) input) +{ + typedef math::quaternion quaternion_t; + typedef typename quaternion_t::data_type data_type; + + const T xLengthSq = hlsl::dot(input[0], input[0]); + const T yLengthSq = hlsl::dot(input[1], input[1]); + const T zLengthSq = hlsl::dot(input[2], input[2]); + const T uniformScaleSq = (xLengthSq + yLengthSq + zLengthSq) / T(3.0); + if (uniformScaleSq < numeric_limits::min) + { + quaternion_t retval; + retval.data = hlsl::promote(bit_cast(numeric_limits::quiet_NaN)); + return retval; + } + + const T uniformScale = hlsl::sqrt(uniformScaleSq); + matrix m = input; + m /= uniformScale; + + const T m00 = m[0][0]; + const T m11 = m[1][1]; + const T m22 = m[2][2]; + const T neg_m00 = -m00; + const T neg_m11 = -m11; + const T neg_m22 = -m22; + const data_type Qx = data_type(m00, m00, neg_m00, neg_m00); + const data_type Qy = data_type(m11, neg_m11, m11, neg_m11); + const data_type Qz = data_type(m22, neg_m22, neg_m22, m22); + const data_type tmp = Qx + Qy + Qz; + + quaternion_t retval; + if (tmp.x > T(0.0)) + { + const T scales = hlsl::sqrt(tmp.x + T(1.0)); + const T invscales = T(0.5) / scales; + retval.data.x = (m[2][1] - m[1][2]) * invscales; + retval.data.y = (m[0][2] - m[2][0]) * invscales; + retval.data.z = (m[1][0] - m[0][1]) * invscales; + retval.data.w = scales * T(0.5); + } + else if (tmp.y > T(0.0)) + { + const T scales = hlsl::sqrt(tmp.y + T(1.0)); + const T invscales = T(0.5) / scales; + retval.data.x = scales * T(0.5); + retval.data.y = (m[0][1] + m[1][0]) * invscales; + retval.data.z = (m[2][0] + m[0][2]) * invscales; + retval.data.w = (m[2][1] - m[1][2]) * invscales; + } + else if (tmp.z > T(0.0)) + { + const T scales = hlsl::sqrt(tmp.z + T(1.0)); + const T invscales = T(0.5) / scales; + retval.data.x = (m[0][1] + m[1][0]) * invscales; + retval.data.y = scales * T(0.5); + retval.data.z = (m[1][2] + m[2][1]) * invscales; + retval.data.w = (m[0][2] - m[2][0]) * invscales; + } + else + { + const T scales = hlsl::sqrt(tmp.w + T(1.0)); + const T invscales = T(0.5) / scales; + retval.data.x = (m[0][2] + m[2][0]) * invscales; + retval.data.y = (m[1][2] + m[2][1]) * invscales; + retval.data.z = scales * T(0.5); + retval.data.w = (m[1][0] - m[0][1]) * invscales; + } + + retval.data *= uniformScale; + return retval; +} + +template +inline math::quaternion matrix_to_quaternion_cast(NBL_CONST_REF_ARG(matrix) m) +{ + const math::quaternion directCandidate = math::quaternion::create(m, true); + const math::quaternion transposedCandidate = math::quaternion::create(hlsl::transpose(m), true); + const math::quaternion directFallback = direct_matrix_to_quaternion_cast(m); + const math::quaternion transposedFallback = direct_matrix_to_quaternion_cast(hlsl::transpose(m)); + + const T directScore = score_matrix_to_quaternion_cast_candidate(m, directCandidate); + const T transposedScore = score_matrix_to_quaternion_cast_candidate(m, transposedCandidate); + const T directFallbackScore = score_matrix_to_quaternion_cast_candidate(m, directFallback); + const T transposedFallbackScore = score_matrix_to_quaternion_cast_candidate(m, transposedFallback); + + math::quaternion bestCandidate = directCandidate; + T bestScore = directScore; + + if (transposedScore < bestScore) + { + bestCandidate = transposedCandidate; + bestScore = transposedScore; + } + if (directFallbackScore < bestScore) + { + bestCandidate = directFallback; + bestScore = directFallbackScore; + } + if (transposedFallbackScore < bestScore) + bestCandidate = transposedFallback; + + return bestCandidate; +} + template struct static_cast_helper, matrix > { - static inline math::quaternion cast(const matrix m) + static inline math::quaternion cast(NBL_CONST_REF_ARG(matrix) m) { - return math::quaternion::create(m, true); + return matrix_to_quaternion_cast(m); } }; } diff --git a/include/nbl/ext/Cameras/CArcballCamera.hpp b/include/nbl/ext/Cameras/CArcballCamera.hpp new file mode 100644 index 0000000000..d88d3743f3 --- /dev/null +++ b/include/nbl/ext/Cameras/CArcballCamera.hpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2024 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_ARCBALL_CAMERA_HPP_ +#define _C_ARCBALL_CAMERA_HPP_ + +#include +#include + +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera with planar target translation and bounded arcball orbiting. +/// +/// The runtime state is inherited from `CSphericalTargetCamera`. Translation +/// moves the target in the current view plane. Rotation updates orbit yaw and +/// pitch under a symmetric pitch limit. +class CArcballCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + CArcballCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(position, target) + { + m_orbitUv.y = std::clamp(m_orbitUv.y, MinPitch, MaxPitch); + applyPose(); + } + ~CArcballCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Apply one frame of semantic translation and rotation input to the arcball rig. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceTargetRelativeState(reference, resolvedState)) + { + return false; + } + + resolvedState.orbitUv.y = std::clamp(resolvedState.orbitUv.y, MinPitch, MaxPitch); + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + + const auto deltaRotation = scaleVirtualRotation(impulse.dVirtualRotation); + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const double deltaDistance = scaleUnscaledVirtualTranslation(impulse.dVirtualTranslate.z); + + m_orbitUv.x += deltaRotation.y; + m_orbitUv.y = std::clamp(m_orbitUv.y + deltaRotation.x, MinPitch, MaxPitch); + m_distance = std::clamp(m_distance + static_cast(deltaDistance), MinDistance, MaxDistance); + + const auto basis = computeBasis(m_orbitUv, m_distance); + applyPlanarTargetTranslation(deltaTranslation, basis); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::Arcball; } + /// @brief Return the stable user-facing identifier for this concrete camera kind. + virtual std::string_view getIdentifier() const override { return "Arcball Camera"; } + + static inline constexpr float MinDistance = base_t::MinDistance; + static inline constexpr float MaxDistance = base_t::MaxDistance; + +private: + + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; + static inline constexpr double MaxPitch = SCameraTargetRelativeRigDefaults::ArcballPitchLimitRad; + static inline constexpr double MinPitch = -MaxPitch; +}; + +} + +#endif diff --git a/include/nbl/ext/Cameras/CCameraFileUtilities.hpp b/include/nbl/ext/Cameras/CCameraFileUtilities.hpp new file mode 100644 index 0000000000..cd381bdacd --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraFileUtilities.hpp @@ -0,0 +1,94 @@ +#ifndef _C_CAMERA_FILE_UTILITIES_HPP_ +#define _C_CAMERA_FILE_UTILITIES_HPP_ + +#include +#include +#include + +#include "nbl/system/IFile.h" +#include "nbl/system/ISystem.h" + +namespace nbl::system +{ + +/// @brief Shared file I/O helpers used by camera persistence and scripted-runtime loaders. +/// +/// The helpers keep camera-facing persistence code independent from ad-hoc file +/// handling and provide one place for consistent error propagation. +struct CCameraFileUtilities final +{ +public: + /// @brief Read a whole file into a byte buffer. + static inline bool readBinaryFile( + ISystem& system, + const path& filePath, + std::vector& outPayload, + std::string* error = nullptr, + const std::string_view openError = {}) + { + ISystem::future_t> future; + system.createFile(future, filePath, IFile::ECF_READ | IFile::ECF_MAPPABLE); + auto file = future.acquire(); + if (!file || !file->get()) + { + if (error && !openError.empty()) + *error = std::string(openError); + return false; + } + + auto& input = *file->get(); + const auto fileSize = input.getSize(); + outPayload.resize(fileSize); + if (outPayload.empty()) + return true; + + IFile::success_t readResult; + input.read(readResult, outPayload.data(), 0, fileSize); + if (!static_cast(readResult)) + { + if (error && !openError.empty()) + *error = std::string(openError); + return false; + } + return true; + } + + /// @brief Read a whole file and interpret its payload as UTF-8 text. + static inline bool readTextFile( + ISystem& system, + const path& filePath, + std::string& outText, + std::string* error = nullptr, + const std::string_view openError = {}) + { + std::vector payload; + if (!readBinaryFile(system, filePath, payload, error, openError)) + return false; + + outText.assign(reinterpret_cast(payload.data()), payload.size()); + return true; + } + + /// @brief Overwrite a file with the provided text payload. + static inline bool writeTextFile( + ISystem& system, + const path& filePath, + const std::string_view text) + { + ISystem::future_t> future; + system.createFile(future, filePath, IFile::ECF_WRITE); + auto file = future.acquire(); + if (!file || !file->get()) + return false; + if (text.empty()) + return true; + + IFile::success_t writeResult; + (*file)->write(writeResult, text.data(), 0, text.size()); + return static_cast(writeResult); + } +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_FILE_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraFollowRegressionUtilities.hpp b/include/nbl/ext/Cameras/CCameraFollowRegressionUtilities.hpp new file mode 100644 index 0000000000..807698248d --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraFollowRegressionUtilities.hpp @@ -0,0 +1,146 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_FOLLOW_REGRESSION_UTILITIES_HPP_ +#define _C_CAMERA_FOLLOW_REGRESSION_UTILITIES_HPP_ + +#include + +#include "CCameraFollowUtilities.hpp" + +namespace nbl::system +{ + +struct SCameraProjectedTargetMetrics final +{ + hlsl::float32_t2 ndc = hlsl::float32_t2(0.0f); + float radius = 0.0f; +}; + +/// @brief Reusable follow validation helpers. +/// +/// The checks stay camera-domain: +/// +/// - camera-to-target direction must match the camera forward axis for locking modes +/// - target distance must be finite and internally consistent +/// - spherical cameras must write the tracked target back into spherical target state +/// - spherical distance must match the goal-derived distance when present +struct SCameraFollowRegressionResult +{ + bool passed = false; + bool hasLockMetrics = false; + float lockAngleDeg = 0.0f; + double targetDistance = 0.0; + bool hasProjectedMetrics = false; + SCameraProjectedTargetMetrics projectedTarget = {}; + bool hasSphericalState = false; + hlsl::float64_t3 sphericalTarget = hlsl::float64_t3(0.0); + float sphericalDistance = 0.0f; +}; + +/// @brief Reusable visual/debug metrics for one active follow configuration. +struct SCameraFollowVisualMetrics +{ + bool active = false; + core::ECameraFollowMode mode = core::ECameraFollowMode::Disabled; + bool lockValid = false; + float lockAngleDeg = 0.0f; + float targetDistance = 0.0f; + bool projectedValid = false; + SCameraProjectedTargetMetrics projectedTarget = {}; +}; + +/// @brief Shared view/projection bundle for CPU-side projected target metrics. +struct SCameraProjectionContext +{ + hlsl::float32_t4x4 viewMatrix = hlsl::float32_t4x4(1.0f); + hlsl::float32_t4x4 projectionMatrix = hlsl::float32_t4x4(1.0f); +}; + +/// @brief Shared tolerances for follow target lock, writeback, and projected-center checks. +struct SCameraFollowRegressionThresholds +{ + static inline constexpr float DefaultClipWEpsilon = 1e-5f; + static inline constexpr float DefaultProjectedNdcTolerance = 0.03f; + static inline constexpr float DefaultLockAngleToleranceDeg = static_cast(core::SCameraToolingThresholds::DefaultAngularToleranceDeg); + static inline constexpr double DefaultDistanceTolerance = core::SCameraToolingThresholds::ScalarTolerance; + static inline constexpr double DefaultTargetTolerance = core::SCameraToolingThresholds::TinyScalarEpsilon; + static inline constexpr double DefaultPositionTolerance = core::SCameraToolingThresholds::DefaultPositionTolerance; + static inline constexpr double DefaultRotationToleranceDeg = core::SCameraToolingThresholds::DefaultAngularToleranceDeg; + static inline constexpr double DefaultScalarTolerance = core::SCameraToolingThresholds::ScalarTolerance; + + float clipWEpsilon = DefaultClipWEpsilon; + float projectedNdcTolerance = DefaultProjectedNdcTolerance; + float lockAngleToleranceDeg = DefaultLockAngleToleranceDeg; + double distanceTolerance = DefaultDistanceTolerance; + double targetTolerance = DefaultTargetTolerance; + double positionTolerance = DefaultPositionTolerance; + double rotationToleranceDeg = DefaultRotationToleranceDeg; + double scalarTolerance = DefaultScalarTolerance; +}; + +/// @brief Bundled reusable follow regression flow. +/// The helper builds a follow goal, applies it, verifies the resulting camera state, +/// and then checks lock/writeback follow consistency. +struct SCameraFollowApplyValidationResult +{ + bool hasGoal = false; + core::CCameraGoal goal = {}; + core::CCameraGoalSolver::SApplyResult applyResult = {}; + bool hasCapturedGoal = false; + core::CCameraGoal capturedGoal = {}; + SCameraFollowRegressionResult regression = {}; +}; + +struct CCameraFollowRegressionUtilities final +{ +public: + static SCameraFollowRegressionThresholds makeFollowRegressionThresholds( + float projectedNdcTolerance = SCameraFollowRegressionThresholds::DefaultProjectedNdcTolerance, + float lockAngleToleranceDeg = SCameraFollowRegressionThresholds::DefaultLockAngleToleranceDeg); + + static bool tryComputeProjectedFollowTargetMetrics( + const SCameraProjectionContext& projectionContext, + const core::CTrackedTarget& trackedTarget, + SCameraProjectedTargetMetrics& outMetrics, + float clipWEpsilon = SCameraFollowRegressionThresholds::DefaultClipWEpsilon); + + static bool validateProjectedFollowTargetContract( + const SCameraProjectionContext& projectionContext, + const core::CTrackedTarget& trackedTarget, + SCameraProjectedTargetMetrics& outMetrics, + std::string* error = nullptr, + const SCameraFollowRegressionThresholds& thresholds = {}); + + static SCameraFollowVisualMetrics buildFollowVisualMetrics( + core::ICamera* camera, + const core::CTrackedTarget& trackedTarget, + const core::SCameraFollowConfig* followConfig, + const SCameraProjectionContext* projectionContext = nullptr); + + static bool validateFollowTargetContract( + core::ICamera* camera, + const core::CTrackedTarget& trackedTarget, + const core::SCameraFollowConfig& followConfig, + const core::CCameraGoal& followGoal, + SCameraFollowRegressionResult& out, + std::string* error = nullptr, + const SCameraProjectionContext* projectionContext = nullptr, + const SCameraFollowRegressionThresholds& thresholds = {}); + + static bool buildApplyAndValidateFollowTargetContract( + const core::CCameraGoalSolver& solver, + core::ICamera* camera, + const core::CTrackedTarget& trackedTarget, + const core::SCameraFollowConfig& followConfig, + SCameraFollowApplyValidationResult& out, + std::string* error = nullptr, + const SCameraProjectionContext* projectionContext = nullptr, + const SCameraFollowRegressionThresholds& thresholds = {}); +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_FOLLOW_REGRESSION_UTILITIES_HPP_ + diff --git a/include/nbl/ext/Cameras/CCameraFollowUtilities.hpp b/include/nbl/ext/Cameras/CCameraFollowUtilities.hpp new file mode 100644 index 0000000000..c960d80d57 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraFollowUtilities.hpp @@ -0,0 +1,246 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_FOLLOW_UTILITIES_HPP_ +#define _C_CAMERA_FOLLOW_UTILITIES_HPP_ + +#include +#include + +#include "CCameraGoalSolver.hpp" +#include "CCameraTargetRelativeUtilities.hpp" +#include "CCameraKindUtilities.hpp" + +namespace nbl::core +{ + +/// @brief Reusable tracked-target and follow helpers. +/// +/// The tracked subject owns its own gimbal. Follow code reads that pose and +/// maps one camera plus one tracked target into a `CCameraGoal`. +class CTrackedTarget +{ +public: + using gimbal_t = ICamera::CGimbal; + + /// @brief Construct a tracked target from an initial pose and optional identifier. + CTrackedTarget( + const hlsl::float64_t3& position = hlsl::float64_t3(0.0), + const hlsl::camera_quaternion_t& orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion(), + std::string identifier = "Follow Target"); + + /// @brief Return the stable human-readable identifier of the tracked target. + inline const std::string& getIdentifier() const { return m_identifier; } + /// @brief Return read-only access to the tracked target gimbal. + inline const gimbal_t& getGimbal() const { return m_gimbal; } + /// @brief Return mutable access to the tracked target gimbal. + inline gimbal_t& getGimbal() { return m_gimbal; } + + /// @brief Replace the tracked target pose in world space. + void setPose(const hlsl::float64_t3& position, const hlsl::camera_quaternion_t& orientation); + + /// @brief Replace only the tracked target position. + void setPosition(const hlsl::float64_t3& position); + + /// @brief Replace only the tracked target orientation. + void setOrientation(const hlsl::camera_quaternion_t& orientation); + + /// @brief Replace the tracked target pose from a rigid transform matrix when possible. + bool trySetFromTransform(const hlsl::float64_t4x4& transform); + +private: + std::string m_identifier; + gimbal_t m_gimbal; +}; + +/// @brief Follow policy layered on top of a tracked target gimbal. +/// +/// Each mode defines how tracked-target motion updates the camera: +/// +/// - `OrbitTarget` rewrites target-relative camera state so the tracked target becomes the camera target +/// - `LookAtTarget` preserves camera position and rebuilds orientation toward the tracked target +/// - `KeepWorldOffset` places the camera at `trackedTarget.position + worldOffset` and looks at the target +/// - `KeepLocalOffset` transforms `localOffset` by the tracked-target local frame and looks at the target +/// +/// The tracked target provides pose data. The camera reads that data and does +/// not own the tracked subject. +enum class ECameraFollowMode : uint8_t +{ + Disabled, + OrbitTarget, + LookAtTarget, + KeepWorldOffset, + KeepLocalOffset +}; + +/// @brief Reusable follow configuration interpreted against a tracked target gimbal. +/// `worldOffset` and `localOffset` are only meaningful for their matching offset-based modes. +struct SCameraFollowConfig +{ + /// @brief Whether follow should be applied at all. + bool enabled = false; + /// @brief Follow policy used when the configuration is enabled. + ECameraFollowMode mode = ECameraFollowMode::OrbitTarget; + /// @brief World-space offset preserved by `KeepWorldOffset`. + hlsl::float64_t3 worldOffset = hlsl::float64_t3(0.0); + /// @brief Target-local offset preserved by `KeepLocalOffset`. + hlsl::float64_t3 localOffset = hlsl::float64_t3(0.0); +}; + +/// @brief Shared policy helpers for tracked-target follow. +/// +/// The helpers decide which follow modes lock the view, which ones move the +/// camera, how offsets are captured, and how a tracked target is translated into +/// a `CCameraGoal` that can then be applied through the shared goal solver. +struct CCameraFollowUtilities final +{ + /// @brief Return whether the follow mode rebuilds camera orientation toward the tracked target. + static inline constexpr bool cameraFollowModeLocksViewToTarget(const ECameraFollowMode mode) + { + switch (mode) + { + case ECameraFollowMode::OrbitTarget: + case ECameraFollowMode::LookAtTarget: + case ECameraFollowMode::KeepWorldOffset: + case ECameraFollowMode::KeepLocalOffset: + return true; + default: + return false; + } + } + + /// @brief Return whether the follow mode moves the camera world position together with the target. + static inline constexpr bool cameraFollowModeMovesCameraPosition(const ECameraFollowMode mode) + { + switch (mode) + { + case ECameraFollowMode::OrbitTarget: + case ECameraFollowMode::KeepWorldOffset: + case ECameraFollowMode::KeepLocalOffset: + return true; + default: + return false; + } + } + + /// @brief Return whether the follow mode preserves the current camera world position. + static inline constexpr bool cameraFollowModeKeepsCameraWorldPosition(const ECameraFollowMode mode) + { + return mode == ECameraFollowMode::LookAtTarget; + } + + /// @brief Return whether the follow mode interprets `worldOffset`. + static inline constexpr bool cameraFollowModeUsesWorldOffset(const ECameraFollowMode mode) + { + return mode == ECameraFollowMode::KeepWorldOffset; + } + + /// @brief Return whether the follow mode interprets `localOffset`. + static inline constexpr bool cameraFollowModeUsesLocalOffset(const ECameraFollowMode mode) + { + return mode == ECameraFollowMode::KeepLocalOffset; + } + + /// @brief Return whether the follow mode needs the tracked target local frame. + static inline constexpr bool cameraFollowModeUsesTrackedTargetLocalFrame(const ECameraFollowMode mode) + { + return mode == ECameraFollowMode::KeepLocalOffset; + } + + /// @brief Return whether the follow mode requires a captured offset before it can be replayed. + static inline constexpr bool cameraFollowModeUsesCapturedOffset(const ECameraFollowMode mode) + { + return cameraFollowModeUsesWorldOffset(mode) || cameraFollowModeUsesLocalOffset(mode); + } + + /// @brief Return the shared default follow mode for one camera kind. + static inline constexpr ECameraFollowMode getDefaultCameraFollowMode(const ICamera::CameraKind kind) + { + switch (kind) + { + case ICamera::CameraKind::Orbit: + case ICamera::CameraKind::Arcball: + case ICamera::CameraKind::Turntable: + case ICamera::CameraKind::TopDown: + case ICamera::CameraKind::Isometric: + case ICamera::CameraKind::DollyZoom: + case ICamera::CameraKind::Path: + return ECameraFollowMode::OrbitTarget; + case ICamera::CameraKind::Chase: + case ICamera::CameraKind::Dolly: + return ECameraFollowMode::KeepLocalOffset; + default: + return ECameraFollowMode::Disabled; + } + } + + /// @brief Build the shared default follow configuration for one camera kind. + static inline constexpr SCameraFollowConfig makeDefaultFollowConfig(const ICamera::CameraKind kind) + { + const auto mode = getDefaultCameraFollowMode(kind); + return { + .enabled = mode != ECameraFollowMode::Disabled, + .mode = mode + }; + } + + /// @brief Build the shared default follow configuration for one concrete camera instance. + static inline constexpr SCameraFollowConfig makeDefaultFollowConfig(const ICamera* const camera) + { + return camera ? makeDefaultFollowConfig(camera->getKind()) : SCameraFollowConfig{}; + } + + /// @brief Transform a tracked-target local offset into world space. + static hlsl::float64_t3 transformFollowLocalOffset(const ICamera::CGimbal& gimbal, const hlsl::float64_t3& localOffset); + + /// @brief Project a world-space offset into the tracked target local frame. + static hlsl::float64_t3 projectFollowWorldOffsetToLocal(const ICamera::CGimbal& gimbal, const hlsl::float64_t3& worldOffset); + + /// @brief Build a look-at orientation that points from `position` toward the tracked target. + static bool buildFollowLookAtOrientation( + const hlsl::float64_t3& position, + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& preferredUp, + hlsl::camera_quaternion_t& outOrientation); + + /// @brief Capture world-space and target-local follow offsets from the current camera pose. + static bool captureFollowOffsetsFromCamera( + const CCameraGoalSolver& solver, + ICamera* camera, + const CTrackedTarget& trackedTarget, + SCameraFollowConfig& ioConfig); + + /// @brief Measure the angular lock error between a camera forward axis and a tracked target. + static bool tryComputeFollowTargetLockMetrics( + const ICamera::CGimbal& cameraGimbal, + const CTrackedTarget& trackedTarget, + float& outAngleDeg, + double* outDistance = nullptr); + + static bool tryBuildFollowPositionGoal( + ICamera* camera, + CCameraGoal& outGoal, + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const hlsl::float64_t3& preferredUp); + + static bool tryBuildFollowGoal( + const CCameraGoalSolver& solver, + ICamera* camera, + const CTrackedTarget& trackedTarget, + const SCameraFollowConfig& config, + CCameraGoal& outGoal); + + static CCameraGoalSolver::SApplyResult applyFollowToCamera( + const CCameraGoalSolver& solver, + ICamera* camera, + const CTrackedTarget& trackedTarget, + const SCameraFollowConfig& config, + CCameraGoal* outGoal = nullptr); +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_FOLLOW_UTILITIES_HPP_ + diff --git a/include/nbl/ext/Cameras/CCameraGoal.hpp b/include/nbl/ext/Cameras/CCameraGoal.hpp new file mode 100644 index 0000000000..d4121e2ba8 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraGoal.hpp @@ -0,0 +1,409 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_GOAL_HPP_ +#define _C_CAMERA_GOAL_HPP_ + +#include +#include +#include +#include +#include + +#include "CCameraPathUtilities.hpp" +#include "CCameraTargetRelativeUtilities.hpp" +#include "ICamera.hpp" + +namespace nbl::core +{ + +/// @brief Typed transport object for camera state used by capture, comparison, presets, and playback. +struct CCameraGoal : SCameraRigPose +{ + /// @brief Camera kind that originally produced this goal. + ICamera::CameraKind sourceKind = ICamera::CameraKind::Unknown; + /// @brief Capability mask captured from the source camera. + ICamera::capability_flags_t sourceCapabilities = ICamera::None; + /// @brief Goal-state fragments that were valid on the source camera. + ICamera::goal_state_flags_t sourceGoalStateMask = ICamera::GoalStateNone; + /// @brief Whether `targetPosition` is present in this goal. + bool hasTargetPosition = false; + /// @brief Tracked target position in world space. + hlsl::float64_t3 targetPosition = hlsl::float64_t3(0.0); + /// @brief Whether `distance` is present in this goal. + bool hasDistance = false; + /// @brief Explicit target-relative distance when present. + float distance = 0.f; + /// @brief Whether the canonical orbit state is present in this goal. + bool hasOrbitState = false; + /// @brief Canonical orbit yaw and pitch, expressed in radians. + hlsl::float64_t2 orbitUv = hlsl::float64_t2(0.0); + /// @brief Distance associated with `orbitUv` when the orbit state is present. + float orbitDistance = 0.f; + /// @brief Whether a typed path state is present in this goal. + bool hasPathState = false; + /// @brief Typed path state captured from or authored for a `Path Rig` camera. + ICamera::PathState pathState = {}; + /// @brief Whether a dynamic perspective state is present in this goal. + bool hasDynamicPerspectiveState = false; + /// @brief Typed dynamic perspective state captured from or authored for the source camera. + ICamera::DynamicPerspectiveState dynamicPerspectiveState = {}; +}; + +/// @brief Shared canonicalization, comparison, and conversion helpers for `CCameraGoal`. +struct CCameraGoalUtilities final +{ +public: + /// @brief Compute which typed goal-state fragments are required by the current goal payload. + static inline ICamera::goal_state_flags_t getRequiredGoalStateMask(const CCameraGoal& target) + { + ICamera::goal_state_flags_t mask = ICamera::GoalStateNone; + if (target.hasTargetPosition || target.hasDistance || target.hasOrbitState) + mask |= ICamera::GoalStateSphericalTarget; + if (target.hasDynamicPerspectiveState) + mask |= ICamera::GoalStateDynamicPerspective; + if (target.hasPathState) + mask |= ICamera::GoalStatePath; + return mask; + } + + /// @brief Overwrite the canonical target-relative fields of a goal from prebuilt state and pose data. + static inline void applyCanonicalTargetRelativeGoalFields( + CCameraGoal& goal, + const SCameraTargetRelativeState& state, + const SCameraTargetRelativePose& pose) + { + goal.position = pose.position; + goal.orientation = pose.orientation; + goal.hasTargetPosition = true; + goal.targetPosition = state.target; + goal.hasDistance = true; + goal.distance = static_cast(pose.appliedDistance); + goal.hasOrbitState = true; + goal.orbitUv = state.orbitUv; + goal.orbitDistance = static_cast(pose.appliedDistance); + } + + /// @brief Rebuild the canonical target-relative portion of a goal from typed target-relative state. + static inline bool applyCanonicalTargetRelativeGoal(CCameraGoal& goal, const SCameraTargetRelativeState& state) + { + SCameraTargetRelativePose pose = {}; + if (!CCameraTargetRelativeUtilities::tryBuildTargetRelativePoseFromState(state, SCameraTargetRelativeTraits::MinDistance, SCameraTargetRelativeTraits::DefaultMaxDistance, pose)) + return false; + + applyCanonicalTargetRelativeGoalFields(goal, state, pose); + return true; + } + + /// @brief Rebuild the canonical pose and orbit fields of a goal from typed path state. + static inline bool applyCanonicalPathGoalFields( + CCameraGoal& goal, + const hlsl::float64_t3& targetPosition, + const ICamera::PathState& pathState, + const SCameraPathLimits& limits = CCameraPathUtilities::makeDefaultPathLimits()) + { + SCameraCanonicalPathState canonicalPathState = {}; + if (!CCameraPathUtilities::tryBuildCanonicalPathState(targetPosition, pathState, limits, canonicalPathState)) + return false; + + goal.hasTargetPosition = true; + goal.targetPosition = targetPosition; + goal.hasPathState = true; + goal.pathState = pathState; + SCameraTargetRelativePose canonicalPose = {}; + canonicalPose.position = canonicalPathState.pose.position; + canonicalPose.orientation = canonicalPathState.pose.orientation; + canonicalPose.appliedDistance = canonicalPathState.pose.appliedDistance; + applyCanonicalTargetRelativeGoalFields( + goal, + canonicalPathState.targetRelative, + canonicalPose); + return true; + } + + /// @brief Rebuild the canonical pose fields from the goal's current spherical-target payload. + static inline bool applyCanonicalSphericalGoal(CCameraGoal& goal) + { + if (!(goal.hasTargetPosition && goal.hasOrbitState)) + return false; + if (!hlsl::CCameraMathUtilities::isFiniteScalar(goal.orbitUv.x) || !hlsl::CCameraMathUtilities::isFiniteScalar(goal.orbitUv.y) || !hlsl::CCameraMathUtilities::isFiniteScalar(goal.orbitDistance)) + return false; + + return applyCanonicalTargetRelativeGoal( + goal, + { + .target = goal.targetPosition, + .orbitUv = goal.orbitUv, + .distance = goal.orbitDistance + }); + } + + /// @brief Infer a target-relative goal from a target position and a desired camera position. + static inline bool buildCanonicalTargetRelativeGoalFromPosition( + CCameraGoal& goal, + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position) + { + SCameraTargetRelativeState state = {}; + if (!CCameraTargetRelativeUtilities::tryBuildTargetRelativeStateFromPosition( + targetPosition, + position, + SCameraTargetRelativeTraits::MinDistance, + SCameraTargetRelativeTraits::DefaultMaxDistance, + state)) + { + return false; + } + + return applyCanonicalTargetRelativeGoal(goal, state); + } + + /// @brief Resolve the effective target-relative state of a goal against the current camera state. + static inline bool tryResolveCanonicalTargetRelativeState( + const CCameraGoal& goal, + const ICamera::SphericalTargetState& currentState, + SCameraTargetRelativeState& outState) + { + outState.target = goal.hasTargetPosition ? goal.targetPosition : currentState.target; + outState.orbitUv = currentState.orbitUv; + outState.distance = currentState.distance; + + if (goal.hasOrbitState) + { + outState.orbitUv = goal.orbitUv; + outState.distance = goal.orbitDistance; + } + else + { + SCameraTargetRelativeState resolvedState = {}; + if (!CCameraTargetRelativeUtilities::tryBuildTargetRelativeStateFromPosition( + outState.target, + goal.position, + currentState.minDistance, + currentState.maxDistance, + resolvedState)) + { + return false; + } + + outState.orbitUv = resolvedState.orbitUv; + outState.distance = resolvedState.distance; + } + + if (goal.hasDistance && !goal.hasOrbitState) + outState.distance = goal.distance; + + outState.distance = std::clamp(outState.distance, currentState.minDistance, currentState.maxDistance); + return true; + } + + /// @brief Rebuild the canonical pose fields from the goal's current path payload. + static inline bool applyCanonicalPathGoal(CCameraGoal& goal) + { + if (!(goal.hasPathState && goal.hasTargetPosition)) + return false; + if (!CCameraPathUtilities::isPathStateFinite(goal.pathState)) + return false; + return applyCanonicalPathGoalFields(goal, goal.targetPosition, goal.pathState); + } + + /// @brief Canonicalize whichever typed state fragments are currently present on the goal. + static inline bool applyCanonicalGoalState(CCameraGoal& goal) + { + if (goal.hasPathState) + return applyCanonicalPathGoal(goal); + + if (goal.hasTargetPosition && goal.hasOrbitState) + return applyCanonicalSphericalGoal(goal); + + return true; + } + + /// @brief Return a value-copied goal after canonicalizing its typed state. + static inline CCameraGoal canonicalizeGoal(CCameraGoal goal) + { + applyCanonicalGoalState(goal); + return goal; + } + + /// @brief Check whether every populated scalar and vector stored by the goal is finite. + static inline bool isGoalFinite(const CCameraGoal& goal) + { + if (!hlsl::CCameraMathUtilities::isFiniteVec3(goal.position) || !hlsl::CCameraMathUtilities::isFiniteQuaternion(goal.orientation)) + return false; + if (goal.hasTargetPosition && !hlsl::CCameraMathUtilities::isFiniteVec3(goal.targetPosition)) + return false; + if (goal.hasDistance && !hlsl::CCameraMathUtilities::isFiniteScalar(goal.distance)) + return false; + if (goal.hasOrbitState && (!hlsl::CCameraMathUtilities::isFiniteScalar(goal.orbitUv.x) || !hlsl::CCameraMathUtilities::isFiniteScalar(goal.orbitUv.y) || !hlsl::CCameraMathUtilities::isFiniteScalar(goal.orbitDistance))) + return false; + if (goal.hasPathState && !CCameraPathUtilities::isPathStateFinite(goal.pathState)) + return false; + if (goal.hasDynamicPerspectiveState && + (!hlsl::CCameraMathUtilities::isFiniteScalar(goal.dynamicPerspectiveState.baseFov) || !hlsl::CCameraMathUtilities::isFiniteScalar(goal.dynamicPerspectiveState.referenceDistance))) + return false; + return true; + } + + /// @brief Compare two goals using caller-provided pose and scalar tolerances. + static inline bool compareGoals(const CCameraGoal& actual, const CCameraGoal& expected, + const double posEps, const double rotEpsDeg, const double scalarEps) + { + hlsl::SCameraPoseDelta poseDelta = {}; + if (!hlsl::CCameraMathUtilities::tryComputePoseDelta(actual.position, actual.orientation, expected.position, expected.orientation, poseDelta)) + return false; + if (poseDelta.position > posEps || poseDelta.rotationDeg > rotEpsDeg) + return false; + + if (expected.hasTargetPosition) + { + if (!actual.hasTargetPosition || !hlsl::CCameraMathUtilities::nearlyEqualVec3(actual.targetPosition, expected.targetPosition, scalarEps)) + return false; + } + if (expected.hasDistance) + { + if (!actual.hasDistance || !hlsl::CCameraMathUtilities::nearlyEqualScalar(static_cast(actual.distance), static_cast(expected.distance), scalarEps)) + return false; + } + if (expected.hasOrbitState) + { + if (!actual.hasOrbitState) + return false; + if (hlsl::CCameraMathUtilities::getWrappedAngleDistanceDegrees(expected.orbitUv.x, actual.orbitUv.x) > rotEpsDeg) + return false; + if (hlsl::CCameraMathUtilities::getWrappedAngleDistanceDegrees(expected.orbitUv.y, actual.orbitUv.y) > rotEpsDeg) + return false; + if (!hlsl::CCameraMathUtilities::nearlyEqualScalar(static_cast(actual.orbitDistance), static_cast(expected.orbitDistance), scalarEps)) + return false; + } + if (expected.hasPathState) + { + if (!actual.hasPathState) + return false; + if (!CCameraPathUtilities::pathStatesNearlyEqual(actual.pathState, expected.pathState, CCameraPathUtilities::makePathComparisonThresholds(rotEpsDeg, scalarEps))) + return false; + } + if (expected.hasDynamicPerspectiveState) + { + if (!actual.hasDynamicPerspectiveState) + return false; + if (!hlsl::CCameraMathUtilities::nearlyEqualScalar(static_cast(actual.dynamicPerspectiveState.baseFov), static_cast(expected.dynamicPerspectiveState.baseFov), scalarEps)) + return false; + if (!hlsl::CCameraMathUtilities::nearlyEqualScalar(static_cast(actual.dynamicPerspectiveState.referenceDistance), static_cast(expected.dynamicPerspectiveState.referenceDistance), scalarEps)) + return false; + } + + return true; + } + + static inline std::string describeGoalMismatch(const CCameraGoal& actual, const CCameraGoal& expected) + { + std::ostringstream oss; + hlsl::SCameraPoseDelta poseDelta = {}; + const bool hasPoseDelta = hlsl::CCameraMathUtilities::tryComputePoseDelta(actual.position, actual.orientation, expected.position, expected.orientation, poseDelta); + const auto currentOrientation = hlsl::CCameraMathUtilities::normalizeQuaternion(actual.orientation); + const auto expectedOrientation = hlsl::CCameraMathUtilities::normalizeQuaternion(expected.orientation); + oss << "pos_delta=" << (hasPoseDelta ? poseDelta.position : std::numeric_limits::quiet_NaN()) + << " rot_delta_deg=" << (hasPoseDelta ? poseDelta.rotationDeg : std::numeric_limits::quiet_NaN()) + << " current_pos=(" << actual.position.x << "," << actual.position.y << "," << actual.position.z << ")" + << " expected_pos=(" << expected.position.x << "," << expected.position.y << "," << expected.position.z << ")" + << " current_quat=(" << currentOrientation.data.x << "," << currentOrientation.data.y << "," << currentOrientation.data.z << "," << currentOrientation.data.w << ")" + << " expected_quat=(" << expectedOrientation.data.x << "," << expectedOrientation.data.y << "," << expectedOrientation.data.z << "," << expectedOrientation.data.w << ")"; + + if (actual.hasTargetPosition) + { + oss << " target=(" << actual.targetPosition.x << "," << actual.targetPosition.y << "," << actual.targetPosition.z << ")"; + if (actual.hasDistance) + oss << " distance=" << actual.distance; + if (actual.hasOrbitState) + oss << " orbit_u=" << actual.orbitUv.x << " orbit_v=" << actual.orbitUv.y; + } + else if (expected.hasTargetPosition || expected.hasDistance || expected.hasOrbitState) + { + oss << " spherical_state=unavailable"; + } + if (actual.hasPathState) + { + oss << " path_s=" << actual.pathState.s + << " path_u=" << actual.pathState.u + << " path_v=" << actual.pathState.v + << " path_roll=" << actual.pathState.roll; + } + else if (expected.hasPathState) + { + oss << " path_state=unavailable"; + } + + if (actual.hasDynamicPerspectiveState) + { + oss << " dynamic_base_fov=" << actual.dynamicPerspectiveState.baseFov + << " dynamic_reference_distance=" << actual.dynamicPerspectiveState.referenceDistance; + } + else if (expected.hasDynamicPerspectiveState) + { + oss << " dynamic_perspective_state=unavailable"; + } + + return oss.str(); + } + + static inline CCameraGoal blendGoals(const CCameraGoal& a, const CCameraGoal& b, double alpha) + { + CCameraGoal blended; + blended.position = a.position + (b.position - a.position) * alpha; + blended.orientation = hlsl::CCameraMathUtilities::slerpQuaternion(a.orientation, b.orientation, static_cast(alpha)); + blended.sourceKind = (a.sourceKind == b.sourceKind) ? a.sourceKind : ICamera::CameraKind::Unknown; + blended.sourceCapabilities = a.sourceCapabilities & b.sourceCapabilities; + blended.sourceGoalStateMask = a.sourceGoalStateMask | b.sourceGoalStateMask; + blended.hasTargetPosition = a.hasTargetPosition || b.hasTargetPosition; + if (blended.hasTargetPosition) + { + const auto ta = a.hasTargetPosition ? a.targetPosition : b.targetPosition; + const auto tb = b.hasTargetPosition ? b.targetPosition : a.targetPosition; + blended.targetPosition = ta + (tb - ta) * alpha; + } + blended.hasDistance = a.hasDistance || b.hasDistance; + if (blended.hasDistance) + { + const float da = a.hasDistance ? a.distance : b.distance; + const float db = b.hasDistance ? b.distance : a.distance; + blended.distance = da + (db - da) * static_cast(alpha); + } + blended.hasOrbitState = a.hasOrbitState || b.hasOrbitState; + if (blended.hasOrbitState) + { + const auto orbitUvA = a.hasOrbitState ? a.orbitUv : b.orbitUv; + const auto orbitUvB = b.hasOrbitState ? b.orbitUv : a.orbitUv; + const float da = a.hasOrbitState ? a.orbitDistance : b.orbitDistance; + const float db = b.hasOrbitState ? b.orbitDistance : a.orbitDistance; + + blended.orbitUv = hlsl::float64_t2( + hlsl::CCameraMathUtilities::lerpWrappedAngleRad(orbitUvA.x, orbitUvB.x, alpha), + hlsl::CCameraMathUtilities::lerpWrappedAngleRad(orbitUvA.y, orbitUvB.y, alpha)); + blended.orbitDistance = da + (db - da) * static_cast(alpha); + } + blended.hasDynamicPerspectiveState = a.hasDynamicPerspectiveState || b.hasDynamicPerspectiveState; + if (blended.hasDynamicPerspectiveState) + { + const auto dynamicA = a.hasDynamicPerspectiveState ? a.dynamicPerspectiveState : b.dynamicPerspectiveState; + const auto dynamicB = b.hasDynamicPerspectiveState ? b.dynamicPerspectiveState : a.dynamicPerspectiveState; + blended.dynamicPerspectiveState.baseFov = dynamicA.baseFov + (dynamicB.baseFov - dynamicA.baseFov) * static_cast(alpha); + blended.dynamicPerspectiveState.referenceDistance = + dynamicA.referenceDistance + (dynamicB.referenceDistance - dynamicA.referenceDistance) * static_cast(alpha); + } + blended.hasPathState = a.hasPathState || b.hasPathState; + if (blended.hasPathState) + { + const auto pathA = a.hasPathState ? a.pathState : b.pathState; + const auto pathB = b.hasPathState ? b.pathState : a.pathState; + blended.pathState = CCameraPathUtilities::blendPathStates(pathA, pathB, alpha); + } + return canonicalizeGoal(blended); + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_GOAL_HPP_ + diff --git a/include/nbl/ext/Cameras/CCameraGoalAnalysis.hpp b/include/nbl/ext/Cameras/CCameraGoalAnalysis.hpp new file mode 100644 index 0000000000..4653cc3181 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraGoalAnalysis.hpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_GOAL_ANALYSIS_HPP_ +#define _C_CAMERA_GOAL_ANALYSIS_HPP_ + +#include "CCameraPreset.hpp" +#include "CCameraGoalSolver.hpp" + +namespace nbl::core +{ + +/// @brief Reusable typed answer for `goal/preset -> camera` compatibility checks. +struct SCameraGoalApplyAnalysis +{ + CCameraGoal goal = {}; + CCameraGoalSolver::SCompatibilityResult compatibility = {}; + bool hasCamera = false; + bool finiteGoal = false; + bool canApply = false; + + inline bool exact() const + { + return compatibility.exact; + } + + inline bool dropsGoalState() const + { + return compatibility.missingGoalStateMask != ICamera::GoalStateNone; + } + + inline bool usesSharedStateOnly() const + { + return !compatibility.sameKind && goal.sourceKind != ICamera::CameraKind::Unknown && !dropsGoalState(); + } + + inline bool isMeaningfulApply() const + { + return canApply; + } +}; + +/// @brief Reusable typed answer for `camera -> goal` capture viability. +struct SCameraCaptureAnalysis +{ + CCameraGoal goal = {}; + bool hasCamera = false; + bool capturedGoal = false; + bool finiteGoal = false; + bool canCapture = false; +}; + +struct CCameraGoalAnalysisUtilities final +{ +public: + static inline SCameraGoalApplyAnalysis analyzeGoalApply(const CCameraGoalSolver& solver, const ICamera* camera, const CCameraGoal& goal) + { + SCameraGoalApplyAnalysis analysis; + analysis.goal = CCameraGoalUtilities::canonicalizeGoal(goal); + analysis.hasCamera = camera != nullptr; + analysis.finiteGoal = CCameraGoalUtilities::isGoalFinite(analysis.goal); + analysis.canApply = analysis.hasCamera && analysis.finiteGoal; + if (analysis.hasCamera) + analysis.compatibility = solver.analyzeCompatibility(camera, analysis.goal); + return analysis; + } + + static inline SCameraGoalApplyAnalysis analyzePresetApply(const CCameraGoalSolver& solver, const ICamera* camera, const CCameraPreset& preset) + { + return analyzeGoalApply(solver, camera, CCameraPresetUtilities::makeGoalFromPreset(preset)); + } + + static inline SCameraCaptureAnalysis analyzeCameraCapture(const CCameraGoalSolver& solver, ICamera* camera) + { + SCameraCaptureAnalysis analysis; + const auto capture = solver.captureDetailed(camera); + analysis.goal = capture.goal; + analysis.hasCamera = capture.hasCamera; + analysis.capturedGoal = capture.captured; + analysis.finiteGoal = capture.finiteGoal; + analysis.canCapture = capture.canUseGoal(); + return analysis; + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_GOAL_ANALYSIS_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraGoalSolver.hpp b/include/nbl/ext/Cameras/CCameraGoalSolver.hpp new file mode 100644 index 0000000000..01ca290ed4 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraGoalSolver.hpp @@ -0,0 +1,150 @@ +#ifndef _C_CAMERA_GOAL_SOLVER_HPP_ +#define _C_CAMERA_GOAL_SOLVER_HPP_ + +#include +#include +#include +#include +#include + +#include "CCameraGoal.hpp" +#include "CCameraTargetRelativeUtilities.hpp" +#include "CCameraVirtualEventUtilities.hpp" +#include "nbl/core/util/bitflag.h" + +namespace nbl::core +{ + +/// @brief Goal capture, compatibility analysis, and goal application helper. +/// +/// The solver captures canonical state into `CCameraGoal`, compares a goal +/// against one target camera, applies typed fragments directly when the camera +/// exposes them, and builds virtual-event replay when a typed fragment must be +/// approximated through `manipulate(...)`. +class CCameraGoalSolver +{ +public: + /// @brief Detailed result returned by one goal-capture attempt. + struct SCaptureResult + { + bool hasCamera = false; + bool captured = false; + bool finiteGoal = false; + CCameraGoal goal = {}; + + inline bool canUseGoal() const + { + return hasCamera && captured && finiteGoal; + } + }; + + /// @brief Compatibility of a goal with a target camera kind and state mask. + struct SCompatibilityResult + { + bool sameKind = false; + bool exact = false; + ICamera::goal_state_flags_t requiredGoalStateMask = ICamera::GoalStateNone; + ICamera::goal_state_flags_t supportedGoalStateMask = ICamera::GoalStateNone; + ICamera::goal_state_flags_t missingGoalStateMask = ICamera::GoalStateNone; + }; + + /// @brief Outcome of one goal-application attempt. + struct SApplyResult + { + enum class EStatus : uint8_t + { + Unsupported, + Failed, + AlreadySatisfied, + AppliedAbsoluteOnly, + AppliedVirtualEvents, + AppliedAbsoluteAndVirtualEvents + }; + + enum class EIssue : uint32_t + { + NoIssue = 0u, + UsedAbsolutePoseFallback = core::createBitmask({ 0 }), + MissingSphericalTargetState = core::createBitmask({ 1 }), + MissingPathState = core::createBitmask({ 2 }), + MissingDynamicPerspectiveState = core::createBitmask({ 3 }), + VirtualEventReplayFailed = core::createBitmask({ 4 }) + }; + + EStatus status = EStatus::Unsupported; + bool exact = false; + uint32_t eventCount = 0u; + core::bitflag issues = EIssue::NoIssue; + + inline bool succeeded() const + { + return status != EStatus::Unsupported && status != EStatus::Failed; + } + + inline bool changed() const + { + return status == EStatus::AppliedAbsoluteOnly || + status == EStatus::AppliedVirtualEvents || + status == EStatus::AppliedAbsoluteAndVirtualEvents; + } + + inline bool approximate() const + { + return succeeded() && !exact; + } + + inline bool hasIssue(EIssue issue) const + { + return issues.hasFlags(issue); + } + }; + + bool buildEvents(ICamera* camera, const CCameraGoal& target, std::vector& out) const; + bool capture(ICamera* camera, CCameraGoal& out) const; + SCaptureResult captureDetailed(ICamera* camera) const; + SCompatibilityResult analyzeCompatibility(const ICamera* camera, const CCameraGoal& target) const; + SApplyResult applyDetailed(ICamera* camera, const CCameraGoal& target) const; + bool apply(ICamera* camera, const CCameraGoal& target) const; + +private: + struct SGoalSolverDefaults final + { + static constexpr double UnitScale = 1.0; + static inline const hlsl::float64_t3 UnitAxisDenominator = hlsl::float64_t3(UnitScale); + static inline const hlsl::float64_t3 ScalarToleranceVec = hlsl::float64_t3(SCameraToolingThresholds::ScalarTolerance); + static inline const hlsl::float64_t3 AngularToleranceDegVec = hlsl::float64_t3(SCameraToolingThresholds::DefaultAngularToleranceDeg); + }; + + void appendYawPitchRollEvents( + std::vector& events, + const hlsl::float64_t3& eulerRadians, + double denominator, + bool includeRoll = true) const; + void appendPathDeltaEvents( + std::vector& events, + const SCameraPathDelta& delta, + double moveDenominator, + double rotationDenominator) const; + double getMoveMagnitudeDenominator(const ICamera* camera) const; + double getRotationMagnitudeDenominator(const ICamera* camera) const; + bool computePoseMismatch(ICamera* camera, const CCameraGoal& target, double& outPositionDelta, double& outRotationDeltaDeg) const; + bool tryApplyAbsoluteReferencePose(ICamera* camera, const CCameraGoal& target, bool& outChanged, bool& outExact) const; + bool buildTargetRelativeEvents( + ICamera* camera, + const ICamera::SphericalTargetState& sphericalState, + const SCameraTargetRelativeState& goal, + std::vector& out, + const SCameraTargetRelativeEventPolicy& policy) const; + bool buildPathEvents( + ICamera* camera, + const CCameraGoal& target, + const ICamera::SphericalTargetState& sphericalState, + std::vector& out) const; + bool buildSphericalEvents(ICamera* camera, const CCameraGoal& target, std::vector& out) const; + bool buildFreeEvents(ICamera* camera, const CCameraGoal& target, std::vector& out) const; +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_GOAL_SOLVER_HPP_ + diff --git a/include/nbl/ext/Cameras/CCameraInputBindingUtilities.hpp b/include/nbl/ext/Cameras/CCameraInputBindingUtilities.hpp new file mode 100644 index 0000000000..24daf077c6 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraInputBindingUtilities.hpp @@ -0,0 +1,126 @@ +#ifndef _NBL_C_CAMERA_INPUT_BINDING_UTILITIES_HPP_ +#define _NBL_C_CAMERA_INPUT_BINDING_UTILITIES_HPP_ + +#include +#include + +#include "CCameraKindUtilities.hpp" +#include "ICamera.hpp" +#include "IGimbalBindingLayout.hpp" + +namespace nbl::ui +{ + +/// @brief Reusable keyboard, mouse, and ImGuizmo binding preset grouped for one camera kind. +struct SCameraInputBindingPreset +{ + IGimbalBindingLayout::keyboard_to_virtual_events_t keyboard; + IGimbalBindingLayout::mouse_to_virtual_events_t mouse; + IGimbalBindingLayout::imguizmo_to_virtual_events_t imguizmo; +}; + +/// @brief Shared physical input bundles reused by default presets and smoke probing. +struct SCameraInputBindingPhysicalGroups final +{ + static inline constexpr std::array KeyboardWasdCodes = { + ui::E_KEY_CODE::EKC_W, + ui::E_KEY_CODE::EKC_S, + ui::E_KEY_CODE::EKC_A, + ui::E_KEY_CODE::EKC_D + }; + static inline constexpr std::array KeyboardQeCodes = { + ui::E_KEY_CODE::EKC_Q, + ui::E_KEY_CODE::EKC_E + }; + static inline constexpr std::array KeyboardIjklCodes = { + ui::E_KEY_CODE::EKC_I, + ui::E_KEY_CODE::EKC_K, + ui::E_KEY_CODE::EKC_J, + ui::E_KEY_CODE::EKC_L + }; + static inline constexpr auto KeyboardProbeMoveCodes = KeyboardWasdCodes; + static inline constexpr auto KeyboardProbeLookCodes = KeyboardIjklCodes; + static inline constexpr std::array KeyboardProbeExtraCodes = { + ui::E_KEY_CODE::EKC_U, + ui::E_KEY_CODE::EKC_O + }; + static inline constexpr std::array KeyboardProbeCodes = { + ui::E_KEY_CODE::EKC_W, + ui::E_KEY_CODE::EKC_S, + ui::E_KEY_CODE::EKC_A, + ui::E_KEY_CODE::EKC_D, + ui::E_KEY_CODE::EKC_Q, + ui::E_KEY_CODE::EKC_E, + ui::E_KEY_CODE::EKC_I, + ui::E_KEY_CODE::EKC_K, + ui::E_KEY_CODE::EKC_J, + ui::E_KEY_CODE::EKC_L, + ui::E_KEY_CODE::EKC_U, + ui::E_KEY_CODE::EKC_O + }; + static inline constexpr std::array RelativeMouseCodes = { + ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_X, + ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_X, + ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_Y, + ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_Y + }; + static inline constexpr std::array PositiveScrollCodes = { + ui::E_MOUSE_CODE::EMC_VERTICAL_POSITIVE_SCROLL, + ui::E_MOUSE_CODE::EMC_HORIZONTAL_POSITIVE_SCROLL + }; + static inline constexpr std::array NegativeScrollCodes = { + ui::E_MOUSE_CODE::EMC_VERTICAL_NEGATIVE_SCROLL, + ui::E_MOUSE_CODE::EMC_HORIZONTAL_NEGATIVE_SCROLL + }; +}; + +struct CCameraInputBindingUtilities final +{ +public: + /// @brief Default gains used by the shared binding presets. + /// + /// These gains convert producer-local units into virtual-event magnitudes: + /// held keys into units per second, mouse deltas into units per mouse + /// step, scroll into units per scroll step, and ImGuizmo deltas into + /// virtual translation, rotation, or scale magnitudes. + struct SInputMagnitudeDefaults final + { + static inline constexpr double KeyboardHeldUnitsPerSecond = 1000.0; + static inline constexpr double RelativeMouseUnitsPerStep = 1.0; + static inline constexpr double ScrollUnitsPerStep = 1.0; + static inline constexpr double ImguizmoTranslationUnitsPerWorldUnit = 1.0; + static inline constexpr double ImguizmoRotationUnitsPerRadian = 1.0; + static inline constexpr double ImguizmoScaleUnitsPerFactor = 1.0; + }; + + static bool hasMouseRelativeMovementBinding(const IGimbalBindingLayout::mouse_to_virtual_events_t& mousePreset); + + static bool hasMouseScrollBinding(const IGimbalBindingLayout::mouse_to_virtual_events_t& mousePreset); + + static const IGimbalBindingLayout::keyboard_to_virtual_events_t& getDefaultCameraKeyboardMappingPreset(core::ICamera::CameraKind kind); + + static const IGimbalBindingLayout::keyboard_to_virtual_events_t& getDefaultCameraKeyboardMappingPreset(const core::ICamera& camera); + + static const IGimbalBindingLayout::mouse_to_virtual_events_t& getDefaultCameraMouseMappingPreset(core::ICamera::CameraKind kind); + + static const IGimbalBindingLayout::mouse_to_virtual_events_t& getDefaultCameraMouseMappingPreset(const core::ICamera& camera); + + static IGimbalBindingLayout::imguizmo_to_virtual_events_t buildDefaultCameraImguizmoMappingPreset(uint32_t allowedVirtualEvents); + + static IGimbalBindingLayout::imguizmo_to_virtual_events_t buildDefaultCameraImguizmoMappingPreset(const core::ICamera& camera); + + static SCameraInputBindingPreset buildDefaultCameraInputBindingPreset(core::ICamera::CameraKind kind, uint32_t allowedVirtualEvents); + + static SCameraInputBindingPreset buildDefaultCameraInputBindingPreset(const core::ICamera& camera); + + static void applyDefaultCameraInputBindingPreset( + IGimbalBindingLayout& layout, + core::ICamera::CameraKind kind, + uint32_t allowedVirtualEvents); + + static void applyDefaultCameraInputBindingPreset(IGimbalBindingLayout& layout, const core::ICamera& camera); +}; + +} // namespace nbl::ui + +#endif // _NBL_C_CAMERA_INPUT_BINDING_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraKeyframeTrack.hpp b/include/nbl/ext/Cameras/CCameraKeyframeTrack.hpp new file mode 100644 index 0000000000..823f2a4c84 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraKeyframeTrack.hpp @@ -0,0 +1,58 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_KEYFRAME_TRACK_HPP_ +#define _C_CAMERA_KEYFRAME_TRACK_HPP_ + +#include +#include +#include + +#include "CCameraPreset.hpp" + +namespace nbl::core +{ + +/// @brief Reusable keyframe container plus selection state for playback tooling. +struct CCameraKeyframeTrack +{ + std::vector keyframes; + int selectedKeyframeIx = -1; +}; + +struct CCameraKeyframeTrackUtilities final +{ +public: + /// @brief Compare two keyframes by authored time and shared preset state. + static bool compareKeyframes(const CCameraKeyframe& lhs, const CCameraKeyframe& rhs, + double timeEps, double posEps, double rotEpsDeg, double scalarEps); + + /// @brief Compare two authored keyframe tracks with optional selection-state checking. + static bool compareKeyframeTracks(const CCameraKeyframeTrack& lhs, const CCameraKeyframeTrack& rhs, + double timeEps, double posEps, double rotEpsDeg, double scalarEps, bool compareSelection = true); + + /// @brief Compare only the serialized/authored content of two tracks and ignore transient UI selection state. + static bool compareKeyframeTrackContent(const CCameraKeyframeTrack& lhs, const CCameraKeyframeTrack& rhs, + double timeEps, double posEps, double rotEpsDeg, double scalarEps); + + static bool tryBuildKeyframeTrackPresetAtTime(const CCameraKeyframeTrack& track, float time, CCameraPreset& preset); + + static void sortKeyframeTrackByTime(CCameraKeyframeTrack& track); + + static void clampTrackTimeToKeyframes(const CCameraKeyframeTrack& track, float& time); + + static int selectKeyframeTrackNearestTime(CCameraKeyframeTrack& track, float time); + + static void normalizeSelectedKeyframeTrack(CCameraKeyframeTrack& track); + + static CCameraKeyframe* getSelectedKeyframe(CCameraKeyframeTrack& track); + + static const CCameraKeyframe* getSelectedKeyframe(const CCameraKeyframeTrack& track); + + static bool replaceSelectedKeyframePreset(CCameraKeyframeTrack& track, CCameraPreset preset); +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_KEYFRAME_TRACK_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraKeyframeTrackPersistence.hpp b/include/nbl/ext/Cameras/CCameraKeyframeTrackPersistence.hpp new file mode 100644 index 0000000000..12c57d7a24 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraKeyframeTrackPersistence.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_KEYFRAME_TRACK_PERSISTENCE_HPP_ +#define _C_CAMERA_KEYFRAME_TRACK_PERSISTENCE_HPP_ + +#include +#include + +#include "CCameraKeyframeTrack.hpp" +#include "nbl/system/path.h" + +namespace nbl::system +{ + +class ISystem; + +struct CCameraKeyframeTrackPersistenceUtilities final +{ + /// @brief Serialize one camera keyframe track to JSON text. + static std::string serializeKeyframeTrack(const core::CCameraKeyframeTrack& track, int indent = 2); + /// @brief Deserialize one camera keyframe track from JSON text. + static bool deserializeKeyframeTrack(std::string_view text, core::CCameraKeyframeTrack& track, std::string* error = nullptr); + + /// @brief Save one camera keyframe track to a file. + static bool saveKeyframeTrackToFile(ISystem& system, const path& path, const core::CCameraKeyframeTrack& track, int indent = 2); + /// @brief Load one camera keyframe track from a file. + static bool loadKeyframeTrackFromFile(ISystem& system, const path& path, core::CCameraKeyframeTrack& track, std::string* error = nullptr); +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_KEYFRAME_TRACK_PERSISTENCE_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraKindUtilities.hpp b/include/nbl/ext/Cameras/CCameraKindUtilities.hpp new file mode 100644 index 0000000000..e09fb6ce52 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraKindUtilities.hpp @@ -0,0 +1,140 @@ +#ifndef _C_CAMERA_KIND_UTILITIES_HPP_ +#define _C_CAMERA_KIND_UTILITIES_HPP_ + +#include +#include + +#include "CCameraPathMetadata.hpp" +#include "ICamera.hpp" + +namespace nbl::core +{ + +/// @brief Interaction family used to group camera kinds with matching control semantics. +enum class ECameraInteractionFamily : uint8_t +{ + None, + Fps, + Free, + Orbit, + TargetRig, + Turntable, + TopDown, + Path +}; + +/// @brief Shared metadata for one concrete `CameraKind`. +struct SCameraKindTraits final +{ + ICamera::CameraKind kind = ICamera::CameraKind::Unknown; + std::string_view label = "Unknown"; + std::string_view description = "Unspecified camera behavior"; + ECameraInteractionFamily interactionFamily = ECameraInteractionFamily::None; +}; + +struct CCameraKindUtilities final +{ +public: + static inline constexpr const SCameraKindTraits& getCameraKindTraits(const ICamera::CameraKind kind) + { + const auto ix = static_cast(kind); + if (ix >= CameraKindTraitsTable.size()) + return CameraKindTraitsTable[0u]; + return CameraKindTraitsTable[ix]; + } + + static inline constexpr std::string_view getCameraKindLabel(const ICamera::CameraKind kind) + { + return getCameraKindTraits(kind).label; + } + + static inline constexpr std::string_view getCameraKindDescription(const ICamera::CameraKind kind) + { + return getCameraKindTraits(kind).description; + } + + static inline constexpr ECameraInteractionFamily getCameraInteractionFamily(const ICamera::CameraKind kind) + { + return getCameraKindTraits(kind).interactionFamily; + } + +private: + static inline constexpr std::array(ICamera::CameraKind::Path) + 1u> CameraKindTraitsTable = {{ + { + .kind = ICamera::CameraKind::Unknown, + .label = "Unknown", + .description = "Unspecified camera behavior", + .interactionFamily = ECameraInteractionFamily::None + }, + { + .kind = ICamera::CameraKind::FPS, + .label = "FPS", + .description = "First-person WASD + mouse look", + .interactionFamily = ECameraInteractionFamily::Fps + }, + { + .kind = ICamera::CameraKind::Free, + .label = "Free", + .description = "Free-fly 6DOF with full rotation", + .interactionFamily = ECameraInteractionFamily::Free + }, + { + .kind = ICamera::CameraKind::Orbit, + .label = "Orbit", + .description = "Orbit around target with dolly", + .interactionFamily = ECameraInteractionFamily::Orbit + }, + { + .kind = ICamera::CameraKind::Arcball, + .label = "Arcball", + .description = "Arcball trackball around target", + .interactionFamily = ECameraInteractionFamily::Orbit + }, + { + .kind = ICamera::CameraKind::Turntable, + .label = "Turntable", + .description = "Turntable yaw/pitch around target", + .interactionFamily = ECameraInteractionFamily::Turntable + }, + { + .kind = ICamera::CameraKind::TopDown, + .label = "TopDown", + .description = "Fixed pitch top-down pan", + .interactionFamily = ECameraInteractionFamily::TopDown + }, + { + .kind = ICamera::CameraKind::Isometric, + .label = "Isometric", + .description = "Fixed isometric view with pan", + .interactionFamily = ECameraInteractionFamily::Orbit + }, + { + .kind = ICamera::CameraKind::Chase, + .label = "Chase", + .description = "Target follow with chase controls", + .interactionFamily = ECameraInteractionFamily::TargetRig + }, + { + .kind = ICamera::CameraKind::Dolly, + .label = "Dolly", + .description = "Rig truck/dolly with look-at", + .interactionFamily = ECameraInteractionFamily::TargetRig + }, + { + .kind = ICamera::CameraKind::DollyZoom, + .label = "Dolly Zoom", + .description = "Orbit with dolly-zoom FOV", + .interactionFamily = ECameraInteractionFamily::Orbit + }, + { + .kind = ICamera::CameraKind::Path, + .label = SCameraPathRigMetadata::KindLabel, + .description = SCameraPathRigMetadata::KindDescription, + .interactionFamily = ECameraInteractionFamily::Path + } + }}; +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_KIND_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraManipulationUtilities.hpp b/include/nbl/ext/Cameras/CCameraManipulationUtilities.hpp new file mode 100644 index 0000000000..6d60e93081 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraManipulationUtilities.hpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_MANIPULATION_UTILITIES_HPP_ +#define _C_CAMERA_MANIPULATION_UTILITIES_HPP_ + +#include +#include + +#include "CCameraVirtualEventUtilities.hpp" + +namespace nbl::core +{ + +struct CCameraManipulationUtilities final +{ +public: + /// @brief Apply an authored world-space reference frame through the shared camera runtime entry point. + static inline bool applyReferenceFrameToCamera(ICamera* camera, const hlsl::float64_t4x4& referenceFrame) + { + if (!camera) + return false; + + return camera->manipulateWithUnitMotionScales({}, &referenceFrame); + } + + /// @brief Scale translation and rotation event magnitudes without touching unrelated event types. + static inline void scaleVirtualEvents(std::vector& events, const uint32_t count, const float translationScale, const float rotationScale) + { + for (uint32_t i = 0u; i < count; ++i) + { + auto& ev = events[i]; + if (CVirtualGimbalEvent::isTranslationEvent(ev.type)) + { + ev.magnitude *= translationScale; + } + else if (CVirtualGimbalEvent::isRotationEvent(ev.type)) + { + ev.magnitude *= rotationScale; + } + } + } + + /// @brief Reinterpret world-space translation intents as local camera-space movement events. + static inline void remapTranslationEventsFromWorldToCameraLocal(ICamera* camera, std::vector& events, uint32_t& count) + { + if (!camera) + return; + + std::vector filtered; + filtered.reserve(events.size()); + + for (uint32_t i = 0u; i < count; ++i) + { + const auto& ev = events[i]; + if (!CVirtualGimbalEvent::isTranslationEvent(ev.type)) + filtered.emplace_back(ev); + } + + const auto worldDelta = CCameraVirtualEventUtilities::collectSignedTranslationDelta({ events.data(), count }); + if (hlsl::CCameraMathUtilities::isNearlyZeroVector(worldDelta, static_cast(SCameraToolingThresholds::TinyScalarEpsilon))) + { + events = std::move(filtered); + count = static_cast(events.size()); + return; + } + + const auto scaledTranslationMagnitude = camera->getScaledVirtualTranslationMagnitude(); + CCameraVirtualEventUtilities::appendWorldTranslationAsLocalEvents( + filtered, + camera->getGimbal().getOrientation(), + worldDelta, + hlsl::float64_t3(scaledTranslationMagnitude)); + + events = std::move(filtered); + count = static_cast(events.size()); + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_MANIPULATION_UTILITIES_HPP_ + diff --git a/include/nbl/ext/Cameras/CCameraMathUtilities.hpp b/include/nbl/ext/Cameras/CCameraMathUtilities.hpp new file mode 100644 index 0000000000..4e3fbfa112 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraMathUtilities.hpp @@ -0,0 +1,917 @@ +#ifndef _C_CAMERA_MATH_UTILITIES_HPP_ +#define _C_CAMERA_MATH_UTILITIES_HPP_ + +#include +#include +#include +#include + +#include "nbl/builtin/hlsl/cpp_compat/matrix.hlsl" +#include "nbl/builtin/hlsl/cpp_compat/vector.hlsl" +#include "nbl/builtin/hlsl/math/quaternions.hlsl" +#include "nbl/builtin/hlsl/numbers.hlsl" + +namespace nbl::hlsl +{ + +/// @brief Camera-oriented math aliases and helpers built on top of Nabla `nbl::hlsl` types. +template +using camera_vector_t = vector; + +template +using camera_matrix_t = matrix; + +template +using camera_quaternion_t = math::quaternion; + +template +struct SRigidTransformComponents +{ + camera_vector_t translation = camera_vector_t(T(0)); + camera_quaternion_t orientation = camera_quaternion_t::create(); + camera_vector_t scale = camera_vector_t(T(1)); +}; + +template +struct SCameraPoseDelta +{ + T position = T(0); + T rotationDeg = T(0); +}; + +struct SCameraViewRigDefaults final +{ + static constexpr double DegreesToRadians = numbers::pi / 180.0; + static constexpr double FullTurnDeg = 360.0; + static constexpr double RightAngleDeg = FullTurnDeg / 4.0; + static constexpr double ArcballPitchMarginDeg = 1.0; + static constexpr double DollyPitchMarginDeg = 5.0; + static constexpr double FpsVerticalPitchMarginDeg = 2.0; + // Arcball and turntable pitch stop short of +-90 deg to avoid a singular up axis. + static constexpr double ArcballPitchLimitDeg = RightAngleDeg - ArcballPitchMarginDeg; + static constexpr double TurntablePitchLimitDeg = ArcballPitchLimitDeg; + // Chase rigs keep a narrower pitch envelope so the followed subject stays readable. + static constexpr double ChaseMaxPitchDeg = 70.0; + static constexpr double ChaseMinPitchDeg = -60.0; + // Dolly and FPS rigs also stop short of straight up/down. + static constexpr double DollyPitchLimitDeg = RightAngleDeg - DollyPitchMarginDeg; + static constexpr double FpsVerticalPitchLimitDeg = RightAngleDeg - FpsVerticalPitchMarginDeg; + static constexpr double TopDownPitchDeg = -RightAngleDeg; + // Half of a right angle is the canonical isometric azimuth. + static constexpr double IsometricYawDeg = RightAngleDeg / 2.0; + // tan(theta) = 1 / sqrt(2) for the canonical isometric pitch. + static constexpr double IsometricPitchTangent = 1.0 / numbers::sqrt2; + // atan(1 / sqrt(2)) is the canonical isometric pitch used by the fixed rig. + static inline const double IsometricPitchRad = std::atan(IsometricPitchTangent); + static inline const double IsometricPitchDeg = IsometricPitchRad / DegreesToRadians; + + static inline constexpr double ArcballPitchLimitRad = ArcballPitchLimitDeg * DegreesToRadians; + static inline constexpr double TurntablePitchLimitRad = TurntablePitchLimitDeg * DegreesToRadians; + static inline constexpr double ChaseMaxPitchRad = ChaseMaxPitchDeg * DegreesToRadians; + static inline constexpr double ChaseMinPitchRad = ChaseMinPitchDeg * DegreesToRadians; + static inline constexpr double DollyPitchLimitRad = DollyPitchLimitDeg * DegreesToRadians; + static inline constexpr double FpsVerticalPitchLimitRad = FpsVerticalPitchLimitDeg * DegreesToRadians; + static inline constexpr double TopDownPitchRad = TopDownPitchDeg * DegreesToRadians; + static inline constexpr double IsometricYawRad = IsometricYawDeg * DegreesToRadians; +}; + +struct SCameraRigidMathDefaults final +{ + // Treat vectors as effectively parallel once the normalized dot product stays within 1e-2 of 1. + static constexpr double LookAtParallelThreshold = 1.0 - 1e-2; +}; + +struct CCameraMathUtilities final +{ + template + static inline vector castVector(const vector& input) + { + vector output; + for (uint32_t i = 0u; i < N; ++i) + output[i] = static_cast(input[i]); + return output; + } + + template + static inline matrix promoteAffine3x4To4x4(const matrix& input) + { + matrix output; + output[0] = input[0]; + output[1] = input[1]; + output[2] = input[2]; + output[3] = vector(T(0), T(0), T(0), T(1)); + return output; + } + + template + static inline bool isOrthoBase(const Vec& x, const Vec& y, const Vec& z, const E epsilon = 1e-6) + { + const auto isNormalized = [&](const auto& v) -> bool + { + return hlsl::abs(hlsl::length(v) - static_cast(1.0)) <= epsilon; + }; + + const auto isOrthogonal = [&](const auto& a, const auto& b) -> bool + { + return hlsl::abs(hlsl::dot(a, b)) <= epsilon; + }; + + return isNormalized(x) && isNormalized(y) && isNormalized(z) && + isOrthogonal(x, y) && isOrthogonal(x, z) && isOrthogonal(y, z); + } + + template + static inline T wrapAngleRad(T angle) + { + constexpr T Pi = numbers::pi; + constexpr T TwoPi = Pi * static_cast(2); + + angle = std::fmod(angle + Pi, TwoPi); + if (angle < static_cast(0)) + angle += TwoPi; + return angle - Pi; + } + + template + static inline T getWrappedAngleDistanceRadians(const T a, const T b) + { + return hlsl::abs(wrapAngleRad(a - b)); + } + + template + static inline T getWrappedAngleDistanceDegrees(const T a, const T b) + { + constexpr T HalfTurn = static_cast(180); + constexpr T FullTurn = static_cast(360); + + T angle = std::fmod(a - b + HalfTurn, FullTurn); + if (angle < static_cast(0)) + angle += FullTurn; + return hlsl::abs(angle - HalfTurn); + } + + template + static inline T lerpWrappedAngleRad(const T a, const T b, const T alpha) + { + return a + wrapAngleRad(b - a) * alpha; + } + + template + static inline bool isFiniteScalar(const T value) + { + return std::isfinite(value); + } + + template + static inline constexpr T getCameraMathEpsilon() + { + return std::numeric_limits::epsilon(); + } + + template + static inline bool nearlyEqualScalar(const T a, const T b, const T epsilon) + { + return hlsl::abs(a - b) <= epsilon; + } + + template + static inline bool isNearlyZeroScalar(const T value, const T epsilon = getCameraMathEpsilon()) + { + return hlsl::abs(value) <= epsilon; + } + + template + static inline bool isNearlyZeroVector(const camera_vector_t& value, const T epsilon = getCameraMathEpsilon()) + { + return length(value) <= epsilon; + } + + template + static inline bool hasPlanarDeltaXY(const camera_vector_t& value, const T epsilon = std::numeric_limits::epsilon()) + { + return !isNearlyZeroVector(camera_vector_t(value.x, value.y), epsilon); + } + + template + static inline bool nearlyEqualVec3(const VecA& a, const VecB& b, const T epsilon) + { + const camera_vector_t delta( + static_cast(a.x - b.x), + static_cast(a.y - b.y), + static_cast(a.z - b.z)); + return length(delta) <= epsilon; + } + + template + static inline constexpr camera_vector_t getCameraWorldRight() + { + return camera_vector_t(T(1), T(0), T(0)); + } + + template + static inline constexpr camera_vector_t getCameraWorldUp() + { + return camera_vector_t(T(0), T(1), T(0)); + } + + template + static inline constexpr camera_vector_t getCameraWorldForward() + { + return camera_vector_t(T(0), T(0), T(1)); + } + + template + static inline constexpr T getCameraLookAtParallelThreshold() + { + return static_cast(SCameraRigidMathDefaults::LookAtParallelThreshold); + } + + template + static inline camera_quaternion_t makeIdentityQuaternion() + { + return camera_quaternion_t::create(); + } + + template + static inline camera_quaternion_t makeQuaternionFromComponents(const T x, const T y, const T z, const T w) + { + camera_quaternion_t output; + output.data = camera_vector_t(x, y, z, w); + return output; + } + + template + static inline camera_quaternion_t normalizeQuaternion(const camera_quaternion_t& q) + { + return normalize(q); + } + + template + static inline bool isFiniteQuaternion(const camera_quaternion_t& q) + { + return isFiniteScalar(q.data.x) && + isFiniteScalar(q.data.y) && + isFiniteScalar(q.data.z) && + isFiniteScalar(q.data.w); + } + + template + static inline bool isFiniteVec3(const camera_vector_t& value) + { + return isFiniteScalar(value.x) && + isFiniteScalar(value.y) && + isFiniteScalar(value.z); + } + + template + static inline camera_vector_t safeNormalizeVec3(const camera_vector_t& value, const camera_vector_t& fallback) + { + const auto len = length(value); + if (!isFiniteScalar(len) || len <= getCameraMathEpsilon()) + return fallback; + return value / len; + } + + template + static inline camera_quaternion_t makeQuaternionFromAxisAngle(const camera_vector_t& axis, const T radians) + { + return camera_quaternion_t::create(axis, radians); + } + + template + static inline camera_quaternion_t makeQuaternionFromEulerRadians(const camera_vector_t& eulerRadians) + { + return camera_quaternion_t::create(eulerRadians.x, eulerRadians.y, eulerRadians.z); + } + + template + static inline camera_quaternion_t makeQuaternionFromEulerDegrees(const camera_vector_t& eulerDegrees) + { + return makeQuaternionFromEulerRadians(camera_vector_t( + radians(eulerDegrees.x), + radians(eulerDegrees.y), + radians(eulerDegrees.z))); + } + + template + static inline camera_quaternion_t makeQuaternionFromEulerRadiansYXZ(const camera_vector_t& eulerRadians) + { + const auto pitch = makeQuaternionFromAxisAngle(getCameraWorldRight(), eulerRadians.x); + const auto yaw = makeQuaternionFromAxisAngle(getCameraWorldUp(), eulerRadians.y); + const auto roll = makeQuaternionFromAxisAngle(getCameraWorldForward(), eulerRadians.z); + return normalizeQuaternion(yaw * pitch * roll); + } + + template + static inline camera_quaternion_t makeQuaternionFromEulerDegreesYXZ(const camera_vector_t& eulerDegrees) + { + return makeQuaternionFromEulerRadiansYXZ(camera_vector_t( + radians(eulerDegrees.x), + radians(eulerDegrees.y), + radians(eulerDegrees.z))); + } + + template + static inline camera_quaternion_t makeQuaternionFromBasis( + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward) + { + const auto canonicalForward = safeNormalizeVec3(forward, getCameraWorldForward()); + + auto canonicalRight = right - canonicalForward * dot(right, canonicalForward); + canonicalRight = safeNormalizeVec3( + canonicalRight, + safeNormalizeVec3(cross(up, canonicalForward), getCameraWorldRight())); + + auto canonicalUp = cross(canonicalForward, canonicalRight); + canonicalUp = safeNormalizeVec3( + canonicalUp, + safeNormalizeVec3(up - canonicalForward * dot(up, canonicalForward), getCameraWorldUp())); + + canonicalRight = safeNormalizeVec3(cross(canonicalUp, canonicalForward), canonicalRight); + canonicalUp = safeNormalizeVec3(cross(canonicalForward, canonicalRight), canonicalUp); + static_assert(std::is_same_v || std::is_same_v, "Camera basis conversion is only implemented for float and double."); + + if constexpr (std::is_same_v) + return makeQuaternionFromBasisImpl(canonicalRight, canonicalUp, canonicalForward); + else + return makeQuaternionFromBasisImpl(canonicalRight, canonicalUp, canonicalForward); + } + + template + static inline bool tryBuildCameraBasisFromForwardUpHint( + const camera_vector_t& forwardHint, + const camera_vector_t& upHint, + camera_vector_t& outRight, + camera_vector_t& outUp, + camera_vector_t& outForward) + { + const auto forward = safeNormalizeVec3(forwardHint, getCameraWorldForward()); + if (!isFiniteVec3(forward) || isNearlyZeroVector(forward)) + return false; + + const auto preferredUp = safeNormalizeVec3(upHint, getCameraWorldForward()); + auto right = cross(preferredUp, forward); + if (!isFiniteVec3(right) || isNearlyZeroVector(right)) + { + const auto fallbackUp = hlsl::abs(forward.z) < getCameraLookAtParallelThreshold() ? + getCameraWorldForward() : + getCameraWorldUp(); + right = cross(fallbackUp, forward); + if (!isFiniteVec3(right) || isNearlyZeroVector(right)) + return false; + } + + right = safeNormalizeVec3(right, getCameraWorldRight()); + auto up = safeNormalizeVec3(cross(forward, right), preferredUp); + right = safeNormalizeVec3(cross(up, forward), right); + if (!isOrthoBase(right, up, forward)) + return false; + + outRight = right; + outUp = up; + outForward = forward; + return true; + } + + template + static inline camera_vector_t makeSphericalOffsetFromOrbit(const camera_vector_t& orbitUv, const T distance) + { + return camera_vector_t( + hlsl::cos(orbitUv.y) * hlsl::cos(orbitUv.x) * distance, + hlsl::cos(orbitUv.y) * hlsl::sin(orbitUv.x) * distance, + hlsl::sin(orbitUv.y) * distance); + } + + template + static inline T getPlanarRadiusXZ(const camera_vector_t& offset) + { + return length(camera_vector_t(offset.x, offset.z)); + } + + template + static inline T getPathDistance(const T pathU, const T pathV) + { + return length(camera_vector_t(pathU, pathV)); + } + + template + static inline camera_vector_t makePathOffsetFromState(const T pathS, const T pathU, const T pathV) + { + return camera_vector_t(hlsl::cos(pathS) * pathU, pathV, hlsl::sin(pathS) * pathU); + } + + template + static inline bool sanitizePathState(T& pathS, T& pathU, T& pathV, T& pathRoll, const T minU) + { + if (!isFiniteScalar(pathS) || !isFiniteScalar(pathU) || !isFiniteScalar(pathV) || !isFiniteScalar(pathRoll)) + return false; + + pathS = wrapAngleRad(pathS); + pathU = std::max(minU, pathU); + pathRoll = wrapAngleRad(pathRoll); + return isFiniteScalar(pathS) && + isFiniteScalar(pathU) && + isFiniteScalar(pathV) && + isFiniteScalar(pathRoll); + } + + template + static inline bool tryScalePathStateDistance( + const T desiredDistance, + const T minU, + T& pathU, + T& pathV, + T* outAppliedDistance = nullptr) + { + if (!isFiniteScalar(desiredDistance) || + !isFiniteScalar(pathU) || + !isFiniteScalar(pathV)) + return false; + + const T currentDistance = getPathDistance(pathU, pathV); + constexpr T Epsilon = std::numeric_limits::epsilon(); + if (currentDistance > Epsilon) + { + const T scale = desiredDistance / currentDistance; + pathU = std::max(minU, pathU * scale); + pathV *= scale; + } + else + { + pathU = std::max(minU, desiredDistance); + pathV = T(0); + } + + if (outAppliedDistance) + *outAppliedDistance = getPathDistance(pathU, pathV); + return isFiniteScalar(pathU) && isFiniteScalar(pathV); + } + + template + static inline bool tryBuildPathStateFromPosition( + const camera_vector_t& targetPosition, + const camera_vector_t& position, + const T minRadius, + T& outS, + T& outU, + T& outV) + { + const auto offset = position - targetPosition; + const auto radius = getPlanarRadiusXZ(offset); + if (!isFiniteScalar(radius) || !isFiniteScalar(offset.y)) + return false; + + outS = wrapAngleRad(hlsl::atan2(offset.z, offset.x)); + outU = std::max(minRadius, radius); + outV = offset.y; + return isFiniteScalar(outS) && + isFiniteScalar(outU) && + isFiniteScalar(outV); + } + + template + static inline bool tryBuildLookAtOrientation( + const camera_vector_t& position, + const camera_vector_t& targetPosition, + const camera_vector_t& preferredUp, + camera_quaternion_t& outOrientation) + { + const auto toTarget = targetPosition - position; + camera_vector_t right = camera_vector_t(T(0)); + camera_vector_t up = camera_vector_t(T(0)); + camera_vector_t forward = camera_vector_t(T(0)); + if (!tryBuildCameraBasisFromForwardUpHint(toTarget, preferredUp, right, up, forward)) + return false; + + outOrientation = makeQuaternionFromBasis(right, up, forward); + return true; + } + + template + static inline bool tryExtractRigidPoseFromTransform( + const camera_matrix_t& transform, + camera_vector_t& outTranslation, + camera_quaternion_t& outOrientation) + { + SRigidTransformComponents components; + if (!tryExtractRigidTransformComponents(transform, components)) + return false; + + outTranslation = components.translation; + outOrientation = components.orientation; + return true; + } + + template + static inline bool tryBuildSphericalPoseFromOrbit( + const camera_vector_t& targetPosition, + const camera_vector_t& orbitUv, + const T distance, + const T minDistance, + const T maxDistance, + camera_vector_t& outPosition, + camera_quaternion_t& outOrientation, + T* outAppliedDistance = nullptr) + { + if (!isFiniteScalar(orbitUv.x) || + !isFiniteScalar(orbitUv.y) || + !isFiniteScalar(distance)) + return false; + + const T appliedDistance = std::clamp(distance, minDistance, maxDistance); + const auto spherePosition = makeSphericalOffsetFromOrbit(orbitUv, appliedDistance); + const auto upHint = safeNormalizeVec3( + camera_vector_t( + -hlsl::sin(orbitUv.y) * hlsl::cos(orbitUv.x), + -hlsl::sin(orbitUv.y) * hlsl::sin(orbitUv.x), + hlsl::cos(orbitUv.y)), + getCameraWorldForward()); + camera_vector_t right = camera_vector_t(T(0)); + camera_vector_t up = camera_vector_t(T(0)); + camera_vector_t forward = camera_vector_t(T(0)); + if (!tryBuildCameraBasisFromForwardUpHint(-spherePosition, upHint, right, up, forward)) + return false; + + outPosition = targetPosition + spherePosition; + outOrientation = makeQuaternionFromBasis(right, up, forward); + if (outAppliedDistance) + *outAppliedDistance = appliedDistance; + return true; + } + + template + static inline bool tryBuildOrbitFromPosition( + const camera_vector_t& targetPosition, + const camera_vector_t& position, + const T minDistance, + const T maxDistance, + camera_vector_t& outOrbitUv, + T& outDistance) + { + const auto offset = position - targetPosition; + const auto distance = length(offset); + if (!isFiniteScalar(distance) || distance <= getCameraMathEpsilon()) + return false; + + outDistance = std::clamp(distance, minDistance, maxDistance); + const auto local = offset / outDistance; + outOrbitUv = camera_vector_t( + hlsl::atan2(local.y, local.x), + hlsl::asin(std::clamp(local.z, T(-1), T(1)))); + return isFiniteScalar(outOrbitUv.x) && + isFiniteScalar(outOrbitUv.y) && + isFiniteScalar(outDistance); + } + + template + static inline camera_vector_t getPitchYawFromForwardVector(const camera_vector_t& forward) + { + const T planarLength = length(camera_vector_t(forward.x, forward.z)); + return camera_vector_t( + hlsl::atan2(planarLength, forward.y) - numbers::pi * T(0.5), + hlsl::atan2(forward.x, forward.z)); + } + + template + static inline camera_vector_t getPitchYawFromOrientation(const camera_quaternion_t& orientation) + { + const auto forward = normalizeQuaternion(orientation).transformVector(camera_vector_t(T(0), T(0), T(1)), true); + return getPitchYawFromForwardVector(forward); + } + + template + static inline bool tryBuildPathPoseFromState( + const camera_vector_t& targetPosition, + const T pathS, + const T pathU, + const T pathV, + const T pathRoll, + const T minRadius, + const T minDistance, + const T maxDistance, + camera_vector_t& outPosition, + camera_quaternion_t& outOrientation, + T* outAppliedDistance = nullptr, + camera_vector_t* outOrbitUv = nullptr) + { + if (!isFiniteScalar(pathS) || + !isFiniteScalar(pathU) || + !isFiniteScalar(pathV) || + !isFiniteScalar(pathRoll)) + return false; + + const T appliedU = std::max(minRadius, pathU); + const auto offset = makePathOffsetFromState(pathS, appliedU, pathV); + + camera_vector_t orbitUv = camera_vector_t(T(0)); + T distance = T(0); + if (!tryBuildOrbitFromPosition(targetPosition, targetPosition + offset, minDistance, maxDistance, orbitUv, distance)) + return false; + if (!tryBuildSphericalPoseFromOrbit(targetPosition, orbitUv, distance, minDistance, maxDistance, outPosition, outOrientation, &distance)) + return false; + + if (!isNearlyZeroScalar(pathRoll, std::numeric_limits::epsilon())) + { + const auto basis = getQuaternionBasisMatrix(outOrientation); + const T rollCos = hlsl::cos(pathRoll); + const T rollSin = hlsl::sin(pathRoll); + const auto right = basis[0u] * rollCos + basis[1u] * rollSin; + const auto up = basis[1u] * rollCos - basis[0u] * rollSin; + outOrientation = makeQuaternionFromBasis(right, up, basis[2u]); + } + + if (outAppliedDistance) + *outAppliedDistance = distance; + if (outOrbitUv) + *outOrbitUv = orbitUv; + return true; + } + + template + static inline camera_vector_t rotateVectorByQuaternion(const camera_quaternion_t& orientation, const camera_vector_t& vectorToRotate) + { + return normalizeQuaternion(orientation).transformVector(vectorToRotate, true); + } + + template + static inline camera_vector_t projectWorldVectorToLocalBasis( + const camera_vector_t& worldVector, + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward) + { + const camera_matrix_t basis { right, up, forward }; + return hlsl::mul(hlsl::transpose(basis), worldVector); + } + + template + static inline camera_vector_t transformLocalVectorToWorldBasis( + const camera_vector_t& localVector, + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward) + { + const camera_matrix_t basis { right, up, forward }; + return hlsl::mul(basis, localVector); + } + + template + static inline camera_vector_t getQuaternionEulerRadians(const camera_quaternion_t& orientation) + { + const auto q = normalizeQuaternion(orientation); + const T x = q.data.x; + const T y = q.data.y; + const T z = q.data.z; + const T w = q.data.w; + + const T pitch = hlsl::atan2( + T(2) * (y * z + w * x), + w * w - x * x - y * y + z * z); + const T yaw = hlsl::asin(std::clamp( + T(-2) * (x * z - w * y), + T(-1), + T(1))); + const T roll = hlsl::atan2( + T(2) * (x * y + w * z), + w * w + x * x - y * y - z * z); + + return camera_vector_t(pitch, yaw, roll); + } + + template + static inline camera_vector_t getQuaternionEulerDegrees(const camera_quaternion_t& orientation) + { + const auto eulerRadians = getQuaternionEulerRadians(orientation); + return camera_vector_t( + degrees(eulerRadians.x), + degrees(eulerRadians.y), + degrees(eulerRadians.z)); + } + + template + static inline T getQuaternionAngularDistanceRadians(const camera_quaternion_t& lhs, const camera_quaternion_t& rhs) + { + const auto lhsNormalized = normalizeQuaternion(lhs); + const auto rhsNormalized = normalizeQuaternion(rhs); + const T orientationDot = std::clamp( + static_cast(hlsl::abs(dot(lhsNormalized.data, rhsNormalized.data))), + T(0), + T(1)); + return T(2) * hlsl::acos(orientationDot); + } + + template + static inline T getQuaternionAngularDistanceDegrees(const camera_quaternion_t& lhs, const camera_quaternion_t& rhs) + { + return degrees(getQuaternionAngularDistanceRadians(lhs, rhs)); + } + + template + static inline bool tryComputePoseDelta( + const camera_vector_t& lhsPosition, + const camera_quaternion_t& lhsOrientation, + const camera_vector_t& rhsPosition, + const camera_quaternion_t& rhsOrientation, + SCameraPoseDelta& outDelta) + { + outDelta = {}; + + const auto lhsNormalized = normalizeQuaternion(lhsOrientation); + const auto rhsNormalized = normalizeQuaternion(rhsOrientation); + if (!isFiniteVec3(lhsPosition) || !isFiniteVec3(rhsPosition) || + !isFiniteQuaternion(lhsNormalized) || !isFiniteQuaternion(rhsNormalized)) + { + return false; + } + + outDelta.position = length(lhsPosition - rhsPosition); + outDelta.rotationDeg = getQuaternionAngularDistanceDegrees(lhsNormalized, rhsNormalized); + return isFiniteScalar(outDelta.position) && isFiniteScalar(outDelta.rotationDeg); + } + + template + static inline camera_quaternion_t slerpQuaternion(const camera_quaternion_t& lhs, const camera_quaternion_t& rhs, const T alpha) + { + return camera_quaternion_t::slerp(normalizeQuaternion(lhs), normalizeQuaternion(rhs), alpha); + } + + template + static inline camera_quaternion_t inverseQuaternion(const camera_quaternion_t& q) + { + return inverse(q); + } + + template + static inline camera_vector_t projectWorldVectorToLocalQuaternionFrame( + const camera_quaternion_t& orientation, + const camera_vector_t& worldVector) + { + return rotateVectorByQuaternion(inverseQuaternion(orientation), worldVector); + } + + template + static inline camera_matrix_t getQuaternionBasisMatrix(const camera_quaternion_t& orientation) + { + const auto normalizedOrientation = normalizeQuaternion(orientation); + return camera_matrix_t( + normalizedOrientation.transformVector(getCameraWorldRight(), true), + normalizedOrientation.transformVector(getCameraWorldUp(), true), + normalizedOrientation.transformVector(getCameraWorldForward(), true)); + } + + template + static inline camera_vector_t getQuaternionEulerRadiansYXZ(const camera_quaternion_t& orientation) + { + const auto basis = getQuaternionBasisMatrix(orientation); + const T yaw = hlsl::atan2(basis[2][0], basis[2][2]); + const T c2 = hlsl::length(camera_vector_t(basis[0][1], basis[1][1])); + const T pitch = hlsl::atan2(-basis[2][1], c2); + const T s1 = hlsl::sin(yaw); + const T c1 = hlsl::cos(yaw); + const T roll = hlsl::atan2( + s1 * basis[1][2] - c1 * basis[1][0], + c1 * basis[0][0] - s1 * basis[0][2]); + return camera_vector_t(pitch, yaw, roll); + } + + template + static inline camera_vector_t getQuaternionEulerDegreesYXZ(const camera_quaternion_t& orientation) + { + const auto eulerRadians = getQuaternionEulerRadiansYXZ(orientation); + return camera_vector_t( + degrees(eulerRadians.x), + degrees(eulerRadians.y), + degrees(eulerRadians.z)); + } + + template + static inline camera_vector_t getCameraOrientationEulerRadians(const camera_quaternion_t& orientation) + { + return getQuaternionEulerRadiansYXZ(orientation); + } + + template + static inline camera_vector_t getCameraOrientationEulerDegrees(const camera_quaternion_t& orientation) + { + return getQuaternionEulerDegreesYXZ(orientation); + } + + template + static inline camera_vector_t getOrientationDeltaEulerRadiansYXZ( + const camera_quaternion_t& from, + const camera_quaternion_t& to) + { + const auto deltaQuat = inverseQuaternion(from) * normalizeQuaternion(to); + return getQuaternionEulerRadiansYXZ(deltaQuat); + } + + template + static inline camera_vector_t getWrappedEulerDistanceDegrees( + const camera_vector_t& a, + const camera_vector_t& b) + { + return camera_vector_t( + getWrappedAngleDistanceDegrees(a.x, b.x), + getWrappedAngleDistanceDegrees(a.y, b.y), + getWrappedAngleDistanceDegrees(a.z, b.z)); + } + + template + static inline T getMaxVectorComponent(const camera_vector_t& value) + { + return std::max(value.x, std::max(value.y, value.z)); + } + + template + static inline camera_matrix_t composeTransformMatrix( + const camera_vector_t& translation, + const camera_quaternion_t& orientation, + const camera_vector_t& scale = camera_vector_t(T(1))) + { + camera_matrix_t output = camera_matrix_t(1); + const auto basis = getQuaternionBasisMatrix(orientation); + output[0] = camera_vector_t(basis[0] * scale.x, T(0)); + output[1] = camera_vector_t(basis[1] * scale.y, T(0)); + output[2] = camera_vector_t(basis[2] * scale.z, T(0)); + output[3] = camera_vector_t(translation, T(1)); + return output; + } + + template + static inline bool tryExtractRigidTransformComponents( + const camera_matrix_t& transform, + SRigidTransformComponents& outComponents) + { + outComponents.translation = camera_vector_t(transform[3].x, transform[3].y, transform[3].z); + + auto right = camera_vector_t(transform[0].x, transform[0].y, transform[0].z); + auto up = camera_vector_t(transform[1].x, transform[1].y, transform[1].z); + auto forward = camera_vector_t(transform[2].x, transform[2].y, transform[2].z); + + outComponents.scale = camera_vector_t(length(right), length(up), length(forward)); + + if (!isFiniteVec3(outComponents.translation) || !isFiniteVec3(outComponents.scale)) + return false; + + constexpr T Epsilon = std::numeric_limits::epsilon(); + if (outComponents.scale.x <= Epsilon || outComponents.scale.y <= Epsilon || outComponents.scale.z <= Epsilon) + return false; + + right /= outComponents.scale.x; + up /= outComponents.scale.y; + forward /= outComponents.scale.z; + if (!isOrthoBase(right, up, forward)) + return false; + + outComponents.orientation = makeQuaternionFromBasis(right, up, forward); + return isFiniteQuaternion(outComponents.orientation); + } + + template + static inline bool tryBuildRigidFrameFromTransform( + const camera_matrix_t& transform, + camera_matrix_t& outFrame, + camera_quaternion_t& outOrientation) + { + SRigidTransformComponents components; + if (!tryExtractRigidTransformComponents(transform, components)) + return false; + + outOrientation = components.orientation; + outFrame = composeTransformMatrix(components.translation, components.orientation); + return true; + } + + template + static inline bool decomposeTransformMatrix( + const camera_matrix_t& transform, + camera_vector_t& outTranslation, + camera_vector_t& outRotationEulerDegrees, + camera_vector_t& outScale) + { + SRigidTransformComponents components; + if (!tryExtractRigidTransformComponents(transform, components)) + return false; + + outTranslation = components.translation; + outScale = components.scale; + outRotationEulerDegrees = getCameraOrientationEulerDegrees(components.orientation); + return isFiniteVec3(outRotationEulerDegrees); + } + + static camera_quaternion_t makeQuaternionFromBasisImpl( + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward); + + static camera_quaternion_t makeQuaternionFromBasisImpl( + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward); +}; + +} // namespace nbl::hlsl + +#endif // _C_CAMERA_MATH_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPathMetadata.hpp b/include/nbl/ext/Cameras/CCameraPathMetadata.hpp new file mode 100644 index 0000000000..0ccf2ac6ed --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPathMetadata.hpp @@ -0,0 +1,30 @@ +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PATH_METADATA_HPP_ +#define _C_CAMERA_PATH_METADATA_HPP_ + +#include + +namespace nbl::core +{ + +/// @brief Stable descriptive strings used by the reusable `Path Rig` camera kind. +/// +/// This metadata lives in a lightweight header so code that only needs labels +/// or identifiers does not have to include the full path-model implementation. +struct SCameraPathRigMetadata final +{ + /// @brief User-facing camera kind label. + static inline constexpr std::string_view KindLabel = "Path Rig"; + /// @brief Short user-facing description of the camera kind. + static inline constexpr std::string_view KindDescription = "Path-model camera with typed s/u/v/roll state"; + /// @brief Default runtime identifier used by the concrete camera instance. + static inline constexpr std::string_view Identifier = "Target-relative Path Rig"; + /// @brief Default description of the built-in path model shipped by the shared API. + static inline constexpr std::string_view DefaultModelDescription = "Adjust a target-relative path rig with s/u/v/roll state"; +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_PATH_METADATA_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPathUtilities.hpp b/include/nbl/ext/Cameras/CCameraPathUtilities.hpp new file mode 100644 index 0000000000..42387f5fe1 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPathUtilities.hpp @@ -0,0 +1,308 @@ +#ifndef _C_CAMERA_PATH_UTILITIES_HPP_ +#define _C_CAMERA_PATH_UTILITIES_HPP_ + +#include +#include +#include +#include +#include +#include + +#include "CCameraPathMetadata.hpp" +#include "CCameraTargetRelativeUtilities.hpp" +#include "CCameraVirtualEventUtilities.hpp" +#include "ICamera.hpp" + +namespace nbl::core +{ + +/// @brief Shared helpers for the reusable `PathRig` camera kind. +struct SCameraPathPose final : SCameraRigPose +{ + /// @brief Final radial distance actually applied after clamping and path-state sanitization. + hlsl::float64_t appliedDistance = 0.0; + /// @brief Canonical orbit yaw/pitch derived from the evaluated path state. + hlsl::float64_t2 orbitUv = hlsl::float64_t2(0.0); +}; + +/// @brief Typed delta applied to `ICamera::PathState`. +struct SCameraPathDelta final : ICamera::PathState +{ + /// @brief Pack the delta into one four-component vector. + inline hlsl::float64_t4 asVector() const + { + return ICamera::PathState::asVector(); + } + + /// @brief Reinterpret the delta as the translation-style helper representation. + inline hlsl::float64_t3 translationVector() const + { + return ICamera::PathState::asTranslationVector(); + } + + /// @brief Rebuild the delta from the packed vector representation. + static inline SCameraPathDelta fromVector(const hlsl::float64_t4& value) + { + SCameraPathDelta delta = {}; + delta.s = value.x; + delta.u = value.y; + delta.v = value.z; + delta.roll = value.w; + return delta; + } + + /// @brief Rebuild the delta from a translation-style helper vector and optional roll value. + static inline SCameraPathDelta fromMotion(const hlsl::float64_t3& translation, const double pathRoll = 0.0) + { + SCameraPathDelta delta = {}; + delta.s = translation.z; + delta.u = translation.x; + delta.v = translation.y; + delta.roll = pathRoll; + return delta; + } +}; + +/// @brief One desired path-state change expressed as current state, desired state, and their delta. +struct SCameraPathStateTransition final +{ + ICamera::PathState current = {}; + ICamera::PathState desired = {}; + SCameraPathDelta delta = {}; +}; + +/// @brief Canonical evaluated path state combining a final pose and target-relative view of that pose. +struct SCameraCanonicalPathState final +{ + SCameraPathPose pose = {}; + SCameraTargetRelativeState targetRelative = {}; +}; + +/// @brief Comparison tolerances used when matching two path states. +struct SCameraPathComparisonThresholds final +{ + double sToleranceDeg = SCameraToolingThresholds::DefaultAngularToleranceDeg; + double rollToleranceDeg = SCameraToolingThresholds::DefaultAngularToleranceDeg; + double scalarTolerance = SCameraToolingThresholds::ScalarTolerance; +}; + +/// @brief Result of updating the path distance while preserving the rest of the path state. +struct SCameraPathDistanceUpdateResult final +{ + bool exact = false; + hlsl::float64_t appliedDistance = 0.0; +}; + +/// @brief Default constants used by the built-in `Path Rig` model. +struct SCameraPathDefaults final +{ + static constexpr double MinU = static_cast(SCameraTargetRelativeTraits::MinDistance); + static constexpr double ScalarTolerance = SCameraToolingThresholds::ScalarTolerance; + static constexpr double ExactStateTolerance = SCameraToolingThresholds::TinyScalarEpsilon; + static constexpr double ExactAngleToleranceDeg = ExactStateTolerance * 180.0 / hlsl::numbers::pi; + static constexpr double AngleToleranceDeg = SCameraToolingThresholds::DefaultAngularToleranceDeg; + static inline constexpr std::string_view Identifier = SCameraPathRigMetadata::Identifier; + static inline constexpr std::string_view Description = SCameraPathRigMetadata::DefaultModelDescription; + static inline constexpr ICamera::PathStateLimits Limits = {}; + static inline constexpr SCameraPathComparisonThresholds ComparisonThresholds = { + .sToleranceDeg = AngleToleranceDeg, + .rollToleranceDeg = AngleToleranceDeg, + .scalarTolerance = ScalarTolerance + }; + static inline constexpr SCameraPathComparisonThresholds ExactComparisonThresholds = { + .sToleranceDeg = ExactAngleToleranceDeg, + .rollToleranceDeg = ExactAngleToleranceDeg, + .scalarTolerance = ExactStateTolerance + }; +}; + +using SCameraPathLimits = ICamera::PathStateLimits; + +/// @brief Evaluation context passed into the active path-model control law. +struct SCameraPathControlContext final +{ + ICamera::PathState currentState = {}; + hlsl::float64_t3 translation = hlsl::float64_t3(0.0); + hlsl::float64_t3 rotation = hlsl::float64_t3(0.0); + hlsl::float64_t3 targetPosition = hlsl::float64_t3(0.0); + const CReferenceTransform* reference = nullptr; + SCameraPathLimits limits = SCameraPathDefaults::Limits; +}; + +/// @brief Callback bundle defining path-state resolution, input response, evaluation, and distance updates. +/// +/// A concrete `Path Rig` model provides: +/// - state resolution from target position, world position, and optional typed input +/// - one control law turning accumulated runtime motion into `SCameraPathDelta` +/// - one state integrator +/// - one canonical evaluator producing pose and target-relative view data +/// - one distance-update rule for typed helpers that adjust distance directly +struct SCameraPathModel final +{ + using resolve_state_t = std::function; + using control_law_t = std::function; + using integrate_t = std::function; + using evaluate_t = std::function; + using update_distance_t = std::function; + + resolve_state_t resolveState; + control_law_t controlLaw; + integrate_t integrate; + evaluate_t evaluate; + update_distance_t updateDistance; +}; + +/// @brief Shared state, comparison, and model-building helpers for `Path Rig`. +struct CCameraPathUtilities final +{ + /// @brief Build the default path state used by the built-in model. + static ICamera::PathState makeDefaultPathState(double minU = SCameraPathDefaults::MinU); + + /// @brief Build path-state comparison tolerances from caller-provided angular and scalar thresholds. + static SCameraPathComparisonThresholds makePathComparisonThresholds( + double angularToleranceDeg = SCameraPathDefaults::AngleToleranceDeg, + double scalarTolerance = SCameraPathDefaults::ScalarTolerance); + + /// @brief Return the default path-state limits used when a camera does not expose custom ones. + static inline constexpr SCameraPathLimits makeDefaultPathLimits() + { + return SCameraPathDefaults::Limits; + } + + /// @brief Check whether every scalar stored in the path state is finite. + static bool isPathStateFinite(const ICamera::PathState& state); + + /// @brief Check whether the path limits can be sanitized into a valid numeric domain. + static bool isPathLimitsWellFormed(const SCameraPathLimits& limits); + + /// @brief Clamp and normalize path-state limits into a valid numeric domain. + static bool sanitizePathLimits(SCameraPathLimits& limits); + + /// @brief Sanitize a path state against a caller-provided `minU` lower bound. + static bool sanitizePathState(ICamera::PathState& state, double minU); + + /// @brief Sanitize a path state against a full limit bundle and optionally report the applied distance. + static bool sanitizePathState(ICamera::PathState& state, const SCameraPathLimits& limits, double* outAppliedDistance = nullptr); + + /// @brief Rescale the `(u, v)` pair so the path state reaches the requested radial distance. + static bool tryScalePathStateDistance( + double desiredDistance, + double minU, + ICamera::PathState& ioState, + double* outAppliedDistance = nullptr); + + /// @brief Update the distance encoded by a path state while respecting the provided limits. + static bool tryUpdatePathStateDistance( + float desiredDistance, + const SCameraPathLimits& limits, + ICamera::PathState& ioState, + SCameraPathDistanceUpdateResult* outResult = nullptr); + + static bool tryBuildPathStateFromPosition( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + double minU, + ICamera::PathState& outState); + + static bool tryResolvePathState( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const SCameraPathLimits& limits, + const ICamera::PathState* requestedState, + ICamera::PathState& outState); + + static bool tryBuildPathPoseFromState( + const hlsl::float64_t3& targetPosition, + const ICamera::PathState& state, + const SCameraPathLimits& limits, + SCameraPathPose& outPose); + + static bool tryBuildPathPoseFromState( + const hlsl::float64_t3& targetPosition, + const ICamera::PathState& state, + const SCameraPathLimits& limits, + hlsl::float64_t3& outPosition, + hlsl::camera_quaternion_t& outOrientation, + hlsl::float64_t* outAppliedDistance = nullptr, + hlsl::float64_t2* outOrbitUv = nullptr); + + static bool pathStatesNearlyEqual( + const ICamera::PathState& lhs, + const ICamera::PathState& rhs, + const SCameraPathComparisonThresholds& thresholds = {}); + + static bool pathStatesChanged( + const ICamera::PathState& lhs, + const ICamera::PathState& rhs, + const SCameraPathComparisonThresholds& thresholds = {}); + + static hlsl::float64_t4 buildPathStateDeltaVector( + const ICamera::PathState& currentState, + const ICamera::PathState& desiredState); + + static SCameraPathDelta buildPathStateDelta( + const ICamera::PathState& currentState, + const ICamera::PathState& desiredState); + + static SCameraPathDelta makePathDeltaFromVirtualPathMotion( + const hlsl::float64_t3& translation, + const hlsl::float64_t3& rotation = hlsl::float64_t3(0.0)); + + static SCameraPathDelta buildDefaultPathControlDelta(const SCameraPathControlContext& context); + + static void appendPathDeltaEvents( + std::vector& events, + const SCameraPathDelta& delta, + double moveDenominator, + double rotationDenominator, + const SCameraPathComparisonThresholds& thresholds = {}); + + static bool tryBuildCanonicalPathState( + const hlsl::float64_t3& targetPosition, + const ICamera::PathState& state, + const SCameraPathLimits& limits, + SCameraCanonicalPathState& outState); + + static bool tryApplyPathStateDelta( + const ICamera::PathState& currentState, + const SCameraPathDelta& delta, + const SCameraPathLimits& limits, + ICamera::PathState& outState); + + static ICamera::PathState blendPathStates( + const ICamera::PathState& from, + const ICamera::PathState& to, + double alpha); + + static bool tryBuildPathStateTransition( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& currentPosition, + const hlsl::float64_t3& desiredPosition, + const SCameraPathLimits& limits, + const ICamera::PathState* currentStateOverride, + const ICamera::PathState* desiredStateOverride, + SCameraPathStateTransition& outTransition); + + static SCameraPathModel makeDefaultPathModel(); +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_PATH_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPersistence.hpp b/include/nbl/ext/Cameras/CCameraPersistence.hpp new file mode 100644 index 0000000000..e5be662303 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPersistence.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PERSISTENCE_HPP_ +#define _C_CAMERA_PERSISTENCE_HPP_ + +#include +#include +#include +#include + +#include "CCameraKeyframeTrackPersistence.hpp" +#include "CCameraPresetPersistence.hpp" +#include "nbl/system/path.h" + +namespace nbl::system +{ + +class ISystem; + +struct CCameraPersistenceUtilities final +{ + /// @brief Serialize a preset collection to JSON text. + static std::string serializePresetCollection(std::span presets, int indent = 2); + /// @brief Parse a preset collection from JSON text. + static bool deserializePresetCollection(std::string_view text, std::vector& presets, std::string* error = nullptr); + + /// @brief Save a preset collection to disk as JSON. + static bool savePresetCollectionToFile(ISystem& system, const path& path, std::span presets, int indent = 2); + /// @brief Load a preset collection from disk. + static bool loadPresetCollectionFromFile(ISystem& system, const path& path, std::vector& presets, std::string* error = nullptr); +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_PERSISTENCE_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPlaybackTimeline.hpp b/include/nbl/ext/Cameras/CCameraPlaybackTimeline.hpp new file mode 100644 index 0000000000..64dfc811e2 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPlaybackTimeline.hpp @@ -0,0 +1,103 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PLAYBACK_TIMELINE_HPP_ +#define _C_CAMERA_PLAYBACK_TIMELINE_HPP_ + +#include "CCameraKeyframeTrack.hpp" + +namespace nbl::core +{ + +/// @brief Shared playback cursor state for camera keyframe tracks. +/// The cursor stores playback state only: playing flag, looping mode, speed, and current time. +struct CCameraPlaybackCursor +{ + bool playing = false; + bool loop = true; + float speed = 1.f; + float time = 0.f; +}; + +/// @brief Outcome of advancing a playback cursor against a keyframe track. +/// This result reports how time changed during one advance step. +struct SCameraPlaybackAdvanceResult +{ + bool hasTrack = false; + bool changedTime = false; + bool wrapped = false; + bool reachedEnd = false; + bool stopped = false; + float duration = 0.f; + float time = 0.f; +}; + +struct CCameraPlaybackTimelineUtilities final +{ +public: + /// @brief Duration of the current playback track in seconds. + static inline float getPlaybackTrackDuration(const CCameraKeyframeTrack& track) + { + if (track.keyframes.empty()) + return 0.f; + + return track.keyframes.back().time; + } + + /// @brief Reset cursor time and stop playback without mutating loop or speed settings. + static inline void resetPlaybackCursor(CCameraPlaybackCursor& cursor, const float time = 0.f) + { + cursor.playing = false; + cursor.time = std::max(0.f, time); + } + + /// @brief Clamp cursor time into the valid time range of the current track. + static inline void clampPlaybackCursorToTrack(const CCameraKeyframeTrack& track, CCameraPlaybackCursor& cursor) + { + CCameraKeyframeTrackUtilities::clampTrackTimeToKeyframes(track, cursor.time); + } + + /// @brief Advance cursor time by `dtSec * speed` and report whether playback wrapped or stopped. + static inline SCameraPlaybackAdvanceResult advancePlaybackCursor(CCameraPlaybackCursor& cursor, const CCameraKeyframeTrack& track, const double dtSec) + { + SCameraPlaybackAdvanceResult result; + result.hasTrack = !track.keyframes.empty(); + result.duration = getPlaybackTrackDuration(track); + result.time = cursor.time; + + if (!result.hasTrack || !cursor.playing) + return result; + + const auto previousTime = cursor.time; + cursor.time += static_cast(dtSec * cursor.speed); + result.changedTime = cursor.time != previousTime; + result.time = cursor.time; + + if (result.duration <= 0.f) + return result; + + if (cursor.loop) + { + while (cursor.time > result.duration) + { + cursor.time -= result.duration; + result.wrapped = true; + } + } + else if (cursor.time > result.duration) + { + cursor.time = result.duration; + cursor.playing = false; + result.reachedEnd = true; + result.stopped = true; + } + + result.time = cursor.time; + return result; + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_PLAYBACK_TIMELINE_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPresentationUtilities.hpp b/include/nbl/ext/Cameras/CCameraPresentationUtilities.hpp new file mode 100644 index 0000000000..9b26db9b79 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPresentationUtilities.hpp @@ -0,0 +1,125 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PRESENTATION_UTILITIES_HPP_ +#define _C_CAMERA_PRESENTATION_UTILITIES_HPP_ + +#include + +#include "CCameraTextUtilities.hpp" + +namespace nbl::ui +{ + +/// @brief Shared exactness-oriented filter used by preset presentation surfaces. +enum class EPresetApplyPresentationFilter : uint8_t +{ + All, + Exact, + BestEffort +}; + +/// @brief Shared badge/pill policy derived from one analyzed presentation answer. +struct SCameraGoalApplyPresentationBadges final +{ + bool exact = false; + bool bestEffort = false; + bool dropsState = false; + bool sharedStateOnly = false; + bool blocked = false; +}; + +/// @brief Presentation-ready wrapper around analyzed goal apply compatibility. +struct SCameraGoalApplyPresentation final : core::SCameraGoalApplyAnalysis +{ + SCameraGoalApplyPresentationBadges badges; + std::string sourceKindLabel; + std::string goalStateLabel; + std::string compatibilityLabel; + std::string policyLabel; + + inline bool matchesFilter(const EPresetApplyPresentationFilter mode) const + { + switch (mode) + { + case EPresetApplyPresentationFilter::All: + return true; + case EPresetApplyPresentationFilter::Exact: + return hasCamera && exact(); + case EPresetApplyPresentationFilter::BestEffort: + return hasCamera && !exact(); + default: + return true; + } + } +}; + +/// @brief Presentation-ready wrapper around analyzed camera capture viability. +struct SCameraCapturePresentation final : core::SCameraCaptureAnalysis +{ + std::string policyLabel; +}; + +struct CCameraPresentationUtilities final +{ + /// @brief Shared user-facing label for the exactness filter selector. + static inline const char* getPresetApplyPresentationFilterLabel(const EPresetApplyPresentationFilter mode) + { + switch (mode) + { + case EPresetApplyPresentationFilter::All: + return "All"; + case EPresetApplyPresentationFilter::Exact: + return "Exact"; + case EPresetApplyPresentationFilter::BestEffort: + return "Best-effort"; + default: + return "All"; + } + } + + /// @brief Build reusable badge flags for one preset/keyframe compatibility answer. + static inline SCameraGoalApplyPresentationBadges collectGoalApplyPresentationBadges(const SCameraGoalApplyPresentation& presentation) + { + SCameraGoalApplyPresentationBadges badges; + badges.exact = presentation.exact(); + badges.bestEffort = presentation.hasCamera && !presentation.exact(); + badges.dropsState = presentation.dropsGoalState(); + badges.sharedStateOnly = presentation.usesSharedStateOnly(); + badges.blocked = !presentation.canApply; + return badges; + } + + /// @brief Build presentation text for one analyzed goal-apply result. + static inline SCameraGoalApplyPresentation makeGoalApplyPresentation(const core::SCameraGoalApplyAnalysis& analysis, const core::ICamera* targetCamera) + { + SCameraGoalApplyPresentation presentation; + static_cast(presentation) = analysis; + presentation.badges = collectGoalApplyPresentationBadges(presentation); + presentation.sourceKindLabel = std::string(CCameraTextUtilities::getCameraTypeLabel(presentation.goal.sourceKind)); + presentation.goalStateLabel = CCameraTextUtilities::describeGoalStateMask(presentation.goal.sourceGoalStateMask); + presentation.compatibilityLabel = CCameraTextUtilities::describeGoalApplyCompatibility(analysis, targetCamera); + presentation.policyLabel = CCameraTextUtilities::describeGoalApplyPolicy(analysis); + return presentation; + } + + /// @brief Analyze one preset against one camera and return reusable presentation data. + static inline SCameraGoalApplyPresentation analyzePresetPresentation(const core::CCameraGoalSolver& solver, const core::ICamera* camera, const core::CCameraPreset& preset) + { + return makeGoalApplyPresentation(core::CCameraGoalAnalysisUtilities::analyzePresetApply(solver, camera, preset), camera); + } + + /// @brief Analyze one camera capture path and return reusable presentation data. + static inline SCameraCapturePresentation analyzeCapturePresentation(const core::CCameraGoalSolver& solver, core::ICamera* camera) + { + SCameraCapturePresentation presentation; + static_cast(presentation) = core::CCameraGoalAnalysisUtilities::analyzeCameraCapture(solver, camera); + presentation.policyLabel = CCameraTextUtilities::describeCameraCapturePolicy(presentation, camera); + return presentation; + } +}; + +} // namespace nbl::ui + +#endif // _C_CAMERA_PRESENTATION_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPreset.hpp b/include/nbl/ext/Cameras/CCameraPreset.hpp new file mode 100644 index 0000000000..a04be5e8ce --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPreset.hpp @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PRESET_HPP_ +#define _C_CAMERA_PRESET_HPP_ + +#include +#include + +#include "CCameraGoal.hpp" + +namespace nbl::core +{ + +/// @brief Named persisted camera state built on top of `CCameraGoal`. +struct CCameraPreset +{ + std::string name; + std::string identifier; + CCameraGoal goal = {}; +}; + +/// @brief Time-stamped preset entry used by playback and authoring tools. +struct CCameraKeyframe +{ + CCameraPreset preset; + float time = 0.f; +}; + +struct CCameraPresetUtilities final +{ + static inline void assignGoalToPreset(CCameraPreset& preset, const CCameraGoal& goal) + { + preset.goal = CCameraGoalUtilities::canonicalizeGoal(goal); + } + + static inline CCameraGoal makeGoalFromPreset(const CCameraPreset& preset) + { + return CCameraGoalUtilities::canonicalizeGoal(preset.goal); + } + + /// @brief Compare two named presets through their shared canonical goal state. + static inline bool comparePresets(const CCameraPreset& lhs, const CCameraPreset& rhs, + const double posEps, const double rotEpsDeg, const double scalarEps) + { + return lhs.name == rhs.name && + lhs.identifier == rhs.identifier && + CCameraGoalUtilities::compareGoals(makeGoalFromPreset(lhs), makeGoalFromPreset(rhs), posEps, rotEpsDeg, scalarEps); + } + + /// @brief Compare two preset collections element-by-element through the shared canonical goal state. + static inline bool comparePresetCollections(std::span lhs, std::span rhs, + const double posEps, const double rotEpsDeg, const double scalarEps) + { + if (lhs.size() != rhs.size()) + return false; + + for (size_t i = 0u; i < lhs.size(); ++i) + { + if (!comparePresets(lhs[i], rhs[i], posEps, rotEpsDeg, scalarEps)) + return false; + } + + return true; + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_PRESET_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPresetFlow.hpp b/include/nbl/ext/Cameras/CCameraPresetFlow.hpp new file mode 100644 index 0000000000..8655b6bd21 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPresetFlow.hpp @@ -0,0 +1,150 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PRESET_FLOW_HPP_ +#define _C_CAMERA_PRESET_FLOW_HPP_ + +#include +#include +#include +#include + +#include "CCameraGoalAnalysis.hpp" + +namespace nbl::core +{ + +/// @brief Reusable aggregate summary for applying one preset to multiple cameras. +struct SCameraPresetApplySummary +{ + uint32_t targetCount = 0u; + uint32_t successCount = 0u; + uint32_t approximateCount = 0u; + uint32_t failureCount = 0u; + + inline bool hasTargets() const + { + return targetCount > 0u; + } + + inline bool succeeded() const + { + return hasTargets() && failureCount == 0u; + } + + inline bool approximate() const + { + return approximateCount > 0u; + } +}; + +struct CCameraPresetFlowUtilities final +{ + /// @brief Compare the current camera state against a preset using the shared goal representation. + static inline bool comparePresetToCameraState(const CCameraGoalSolver& solver, ICamera* camera, const CCameraPreset& preset, + const double posEps, const double rotEpsDeg, const double scalarEps) + { + const auto capture = solver.captureDetailed(camera); + if (!capture.canUseGoal()) + return false; + + return CCameraGoalUtilities::compareGoals( + capture.goal, + CCameraPresetUtilities::makeGoalFromPreset(preset), + posEps, + rotEpsDeg, + scalarEps); + } + + /// @brief Explain the first visible mismatch between a camera state and a preset. + static inline std::string describePresetCameraMismatch(const CCameraGoalSolver& solver, ICamera* camera, const CCameraPreset& preset) + { + const auto capture = solver.captureDetailed(camera); + if (!capture.hasCamera) + return "camera=null"; + if (!capture.captured) + return "goal_state=unavailable"; + if (!capture.finiteGoal) + return "goal_state=invalid"; + + return CCameraGoalUtilities::describeGoalMismatch(capture.goal, CCameraPresetUtilities::makeGoalFromPreset(preset)); + } + + /// @brief Build a preset from an already analyzed capture result. + static inline bool tryCapturePreset(const SCameraCaptureAnalysis& captureAnalysis, ICamera* camera, std::string_view name, CCameraPreset& preset) + { + preset = {}; + preset.name = std::string(name); + if (!captureAnalysis.canCapture || !camera) + return false; + + preset.identifier = std::string(camera->getIdentifier()); + CCameraPresetUtilities::assignGoalToPreset(preset, captureAnalysis.goal); + return true; + } + + /// @brief Capture a preset directly from a camera through the shared goal solver. + static inline bool tryCapturePreset(const CCameraGoalSolver& solver, ICamera* camera, std::string_view name, CCameraPreset& preset) + { + return tryCapturePreset(CCameraGoalAnalysisUtilities::analyzeCameraCapture(solver, camera), camera, name, preset); + } + + /// @brief Value-returning convenience wrapper around `tryCapturePreset`. + static inline CCameraPreset capturePreset(const CCameraGoalSolver& solver, ICamera* camera, std::string_view name) + { + CCameraPreset preset; + tryCapturePreset(solver, camera, name, preset); + return preset; + } + + /// @brief Apply a preset through the shared goal solver and preserve detailed apply diagnostics. + static inline CCameraGoalSolver::SApplyResult applyPresetDetailed(const CCameraGoalSolver& solver, ICamera* camera, const CCameraPreset& preset) + { + if (!camera) + return {}; + + return solver.applyDetailed(camera, CCameraPresetUtilities::makeGoalFromPreset(preset)); + } + + /// @brief Bool-returning convenience wrapper around `applyPresetDetailed`. + static inline bool applyPreset(const CCameraGoalSolver& solver, ICamera* camera, const CCameraPreset& preset) + { + return applyPresetDetailed(solver, camera, preset).succeeded(); + } + + /// @brief Fold one detailed apply result into an aggregate preset-apply summary. + static inline void accumulatePresetApplySummary(SCameraPresetApplySummary& summary, const CCameraGoalSolver::SApplyResult& result) + { + ++summary.targetCount; + if (result.succeeded()) + { + ++summary.successCount; + if (result.approximate()) + ++summary.approximateCount; + } + else + { + ++summary.failureCount; + } + } + + /// @brief Apply one preset to a camera range and collect a typed aggregate summary. + static inline SCameraPresetApplySummary applyPresetToCameraRange(const CCameraGoalSolver& solver, std::span cameras, const CCameraPreset& preset) + { + SCameraPresetApplySummary summary; + for (auto* camera : cameras) + { + if (!camera) + continue; + + accumulatePresetApplySummary(summary, applyPresetDetailed(solver, camera, preset)); + } + + return summary; + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_PRESET_FLOW_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraPresetPersistence.hpp b/include/nbl/ext/Cameras/CCameraPresetPersistence.hpp new file mode 100644 index 0000000000..4b00a029f6 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraPresetPersistence.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PRESET_PERSISTENCE_HPP_ +#define _C_CAMERA_PRESET_PERSISTENCE_HPP_ + +#include +#include + +#include "CCameraPreset.hpp" +#include "nbl/system/path.h" + +namespace nbl::system +{ + +class ISystem; + +/// @brief JSON text and file helpers for goals and presets. +struct CCameraPresetPersistenceUtilities final +{ + /// @brief Serialize one camera goal to JSON text. + static std::string serializeGoal(const core::CCameraGoal& goal, int indent = 2); + /// @brief Deserialize one camera goal from JSON text. + static bool deserializeGoal(std::string_view text, core::CCameraGoal& goal, std::string* error = nullptr); + + /// @brief Save one camera goal to a file. + static bool saveGoalToFile(ISystem& system, const path& path, const core::CCameraGoal& goal, int indent = 2); + /// @brief Load one camera goal from a file. + static bool loadGoalFromFile(ISystem& system, const path& path, core::CCameraGoal& goal, std::string* error = nullptr); + + /// @brief Serialize one camera preset to JSON text. + static std::string serializePreset(const core::CCameraPreset& preset, int indent = 2); + /// @brief Deserialize one camera preset from JSON text. + static bool deserializePreset(std::string_view text, core::CCameraPreset& preset, std::string* error = nullptr); + + /// @brief Save one camera preset to a file. + static bool savePresetToFile(ISystem& system, const path& path, const core::CCameraPreset& preset, int indent = 2); + /// @brief Load one camera preset from a file. + static bool loadPresetFromFile(ISystem& system, const path& path, core::CCameraPreset& preset, std::string* error = nullptr); +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_PRESET_PERSISTENCE_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraProjectionUtilities.hpp b/include/nbl/ext/Cameras/CCameraProjectionUtilities.hpp new file mode 100644 index 0000000000..220cdc2fc6 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraProjectionUtilities.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_PROJECTION_UTILITIES_HPP_ +#define _C_CAMERA_PROJECTION_UTILITIES_HPP_ + +#include "IPlanarProjection.hpp" + +namespace nbl::core +{ + +struct CCameraProjectionUtilities final +{ + /// @brief Apply a camera-provided dynamic perspective FOV to one planar projection entry. + static inline bool syncDynamicPerspectiveProjection(ICamera* camera, IPlanarProjection::CProjection& projection) + { + if (!camera) + return false; + + const auto& params = projection.getParameters(); + if (params.m_type != IPlanarProjection::CProjection::Perspective) + return false; + + float dynamicFov = 0.0f; + if (!camera->tryGetDynamicPerspectiveFov(dynamicFov)) + return false; + + projection.setPerspective(params.m_zNear, params.m_zFar, dynamicFov); + return true; + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_PROJECTION_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraScriptedCheckRunner.hpp b/include/nbl/ext/Cameras/CCameraScriptedCheckRunner.hpp new file mode 100644 index 0000000000..abce94e4e1 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraScriptedCheckRunner.hpp @@ -0,0 +1,107 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_SCRIPTED_CHECK_RUNNER_HPP_ +#define _C_CAMERA_SCRIPTED_CHECK_RUNNER_HPP_ + +#include +#include +#include +#include +#include +#include +#include + +#include "CCameraFollowRegressionUtilities.hpp" +#include "CCameraScriptedRuntime.hpp" +#include "SCameraRigPose.hpp" + +namespace nbl::system +{ + +/// @brief Runtime state for authored scripted checks. +/// +/// This state stores: +/// - the index of the next authored check to evaluate +/// - one baseline pose reference +/// - one step pose reference +struct CCameraScriptedCheckRuntimeState +{ + struct SPoseReference final : core::SCameraRigPose + { + bool valid = false; + }; + + size_t nextCheckIndex = 0u; + SPoseReference baseline = {}; + SPoseReference step = {}; +}; + +/// @brief Shared per-frame evaluation context for authored scripted checks. +struct CCameraScriptedCheckContext +{ + uint64_t frame = 0ull; + core::ICamera* camera = nullptr; + const core::CVirtualGimbalEvent* imguizmoVirtual = nullptr; + uint32_t imguizmoVirtualCount = 0u; + const core::CTrackedTarget* trackedTarget = nullptr; + const core::SCameraFollowConfig* followConfig = nullptr; + const SCameraProjectionContext* followProjectionContext = nullptr; + const core::CCameraGoalSolver* goalSolver = nullptr; +}; + +/// @brief Reusable log entry produced by scripted check evaluation. +struct CCameraScriptedCheckLogEntry +{ + bool failure = false; + std::string text; +}; + +/// @brief Result for one frame worth of scripted checks. +struct CCameraScriptedCheckFrameResult +{ + std::vector logs; + bool hadFailures = false; +}; + +struct CCameraScriptedCheckRunnerUtilities final +{ + static void scriptedCheckSetStepReference( + CCameraScriptedCheckRuntimeState& state, + const hlsl::float64_t3& position, + const hlsl::camera_quaternion_t& orientation); + static void scriptedCheckSetBaselineReference( + CCameraScriptedCheckRuntimeState& state, + const hlsl::float64_t3& position, + const hlsl::camera_quaternion_t& orientation); + static bool scriptedCheckComputePoseDelta( + const hlsl::float64_t3& currentPosition, + const hlsl::camera_quaternion_t& currentOrientation, + const hlsl::float64_t3& referencePosition, + const hlsl::camera_quaternion_t& referenceOrientation, + hlsl::SCameraPoseDelta& outDelta); + + template + static inline std::string buildScriptedCheckMessage(Fn&& formatter) + { + std::ostringstream oss; + formatter(oss); + return oss.str(); + } + + static void appendScriptedCheckLog( + CCameraScriptedCheckFrameResult& result, + bool failure, + std::string&& text); + + /// @brief Evaluate all authored scripted checks scheduled for the current frame. + static CCameraScriptedCheckFrameResult evaluateScriptedChecksForFrame( + const std::vector& checks, + CCameraScriptedCheckRuntimeState& state, + const CCameraScriptedCheckContext& context); +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_SCRIPTED_CHECK_RUNNER_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraScriptedRuntime.hpp b/include/nbl/ext/Cameras/CCameraScriptedRuntime.hpp new file mode 100644 index 0000000000..3de6d28f1d --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraScriptedRuntime.hpp @@ -0,0 +1,351 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_SCRIPTED_RUNTIME_HPP_ +#define _C_CAMERA_SCRIPTED_RUNTIME_HPP_ + +#include +#include +#include +#include + +#include "CCameraGoal.hpp" +#include "CCameraFollowRegressionUtilities.hpp" +#include "CVirtualGimbalEvent.hpp" +#include "nbl/ui/KeyCodes.h" + +namespace nbl::system +{ + +/// @brief Shared scripted runtime payload used by camera-sequence consumers. +/// +/// This type stores the expanded per-frame events and checks produced from a +/// compact authored camera sequence. +struct CCameraScriptedInputEvent +{ + enum class Type : uint8_t + { + Keyboard, + Mouse, + Imguizmo, + Goal, + TrackedTargetTransform, + SegmentLabel + }; + + struct KeyboardData + { + enum class Action : uint8_t + { + Uninitialized = 0, + Pressed = 1, + Released = 2 + }; + + ui::E_KEY_CODE key = ui::EKC_NONE; + Action action = Action::Uninitialized; + }; + + struct MouseData + { + enum class Type : uint8_t + { + Uninitialized = 0, + Click = 1, + Scroll = 2, + Movement = 4 + }; + + enum class ClickAction : uint8_t + { + Uninitialized = 0, + Pressed = 1, + Released = 2 + }; + + Type type = Type::Uninitialized; + ui::E_MOUSE_BUTTON button = ui::EMB_LEFT_BUTTON; + ClickAction action = ClickAction::Uninitialized; + hlsl::int16_t2 position = hlsl::int16_t2(0); + hlsl::int16_t2 delta = hlsl::int16_t2(0); + hlsl::int16_t2 scroll = hlsl::int16_t2(0); + }; + + struct GoalData + { + core::CCameraGoal goal = {}; + bool requireExact = true; + }; + + struct TrackedTargetTransformData + { + hlsl::float64_t4x4 transform = hlsl::float64_t4x4(1.0); + }; + + struct SegmentLabelData + { + std::string label; + }; + + uint64_t frame = 0; + Type type = Type::Keyboard; + KeyboardData keyboard; + MouseData mouse; + hlsl::float32_t4x4 imguizmo = hlsl::float32_t4x4(1.f); + GoalData goal; + TrackedTargetTransformData trackedTargetTransform; + SegmentLabelData segmentLabel; +}; + +struct CCameraScriptedCheckDefaults final +{ + static constexpr float VirtualEventTolerance = 1e-3f; + static constexpr float PositionTolerance = static_cast(core::SCameraToolingThresholds::DefaultPositionTolerance); + static constexpr float EulerToleranceDeg = static_cast(core::SCameraToolingThresholds::DefaultAngularToleranceDeg); + static constexpr float FollowScreenToleranceNdc = SCameraFollowRegressionThresholds::DefaultProjectedNdcTolerance; +}; + +struct CCameraScriptedInputCheck +{ + enum class Kind : uint8_t + { + Baseline, + ImguizmoVirtual, + GimbalNear, + GimbalDelta, + GimbalStep, + FollowTargetLock + }; + + struct ExpectedVirtualEvent + { + core::CVirtualGimbalEvent::VirtualEventType type = core::CVirtualGimbalEvent::None; + hlsl::float64_t magnitude = 0.0; + }; + + uint64_t frame = 0; + Kind kind = Kind::Baseline; + float tolerance = CCameraScriptedCheckDefaults::VirtualEventTolerance; + std::vector expectedVirtualEvents; + + hlsl::float32_t3 expectedPos = hlsl::float32_t3(0.f); + hlsl::float32_t3 expectedEulerDeg = hlsl::float32_t3(0.f); + bool hasExpectedPos = false; + bool hasExpectedEuler = false; + float posTolerance = CCameraScriptedCheckDefaults::PositionTolerance; + float eulerToleranceDeg = CCameraScriptedCheckDefaults::EulerToleranceDeg; + float minPosDelta = 0.0f; + float minEulerDeltaDeg = 0.0f; + bool hasPosDeltaConstraint = false; + bool hasEulerDeltaConstraint = false; +}; + +/// @brief Fully expanded scripted timeline shared between authored parsers and runtime consumers. +struct CCameraScriptedTimeline +{ + std::vector events; + std::vector checks; + std::vector captureFrames; + + inline void clear() + { + events.clear(); + checks.clear(); + captureFrames.clear(); + } + + inline bool empty() const + { + return events.empty() && checks.empty() && captureFrames.empty(); + } +}; + +struct CCameraScriptedRuntimeUtilities final +{ + static inline void finalizeScriptedTimeline( + std::vector& events, + std::vector& checks, + std::vector& captureFrames, + const bool disableCaptureFrames = false) + { + std::stable_sort(events.begin(), events.end(), + [](const CCameraScriptedInputEvent& a, const CCameraScriptedInputEvent& b) { return a.frame < b.frame; }); + std::stable_sort(checks.begin(), checks.end(), + [](const CCameraScriptedInputCheck& a, const CCameraScriptedInputCheck& b) { return a.frame < b.frame; }); + if (!captureFrames.empty()) + { + std::sort(captureFrames.begin(), captureFrames.end()); + captureFrames.erase(std::unique(captureFrames.begin(), captureFrames.end()), captureFrames.end()); + } + if (disableCaptureFrames) + captureFrames.clear(); + } + + static inline void finalizeScriptedTimeline(CCameraScriptedTimeline& timeline, const bool disableCaptureFrames = false) + { + finalizeScriptedTimeline(timeline.events, timeline.checks, timeline.captureFrames, disableCaptureFrames); + } + + static inline void appendScriptedGoalEvent( + CCameraScriptedTimeline& timeline, + const uint64_t frame, + const core::CCameraGoal& goal, + const bool requireExact = true) + { + CCameraScriptedInputEvent entry; + entry.frame = frame; + entry.type = CCameraScriptedInputEvent::Type::Goal; + entry.goal.goal = goal; + entry.goal.requireExact = requireExact; + timeline.events.emplace_back(std::move(entry)); + } + + static inline void appendScriptedTrackedTargetTransformEvent( + CCameraScriptedTimeline& timeline, + const uint64_t frame, + const hlsl::float64_t4x4& transform) + { + CCameraScriptedInputEvent entry; + entry.frame = frame; + entry.type = CCameraScriptedInputEvent::Type::TrackedTargetTransform; + entry.trackedTargetTransform.transform = transform; + timeline.events.emplace_back(std::move(entry)); + } + + static inline void appendScriptedSegmentLabelEvent( + CCameraScriptedTimeline& timeline, + const uint64_t frame, + std::string label) + { + CCameraScriptedInputEvent entry; + entry.frame = frame; + entry.type = CCameraScriptedInputEvent::Type::SegmentLabel; + entry.segmentLabel.label = std::move(label); + timeline.events.emplace_back(std::move(entry)); + } + + static inline void appendScriptedBaselineCheck(CCameraScriptedTimeline& timeline, const uint64_t frame) + { + CCameraScriptedInputCheck entry; + entry.frame = frame; + entry.kind = CCameraScriptedInputCheck::Kind::Baseline; + timeline.checks.emplace_back(std::move(entry)); + } + + static inline void appendScriptedGimbalStepCheck( + CCameraScriptedTimeline& timeline, + const uint64_t frame, + const bool hasPosDeltaConstraint, + const float posTolerance, + const float minPosDelta, + const bool hasEulerDeltaConstraint, + const float eulerToleranceDeg, + const float minEulerDeltaDeg) + { + CCameraScriptedInputCheck entry; + entry.frame = frame; + entry.kind = CCameraScriptedInputCheck::Kind::GimbalStep; + if (hasPosDeltaConstraint) + { + entry.hasPosDeltaConstraint = true; + entry.posTolerance = posTolerance; + entry.minPosDelta = minPosDelta; + } + if (hasEulerDeltaConstraint) + { + entry.hasEulerDeltaConstraint = true; + entry.eulerToleranceDeg = eulerToleranceDeg; + entry.minEulerDeltaDeg = minEulerDeltaDeg; + } + timeline.checks.emplace_back(std::move(entry)); + } + + static inline void appendScriptedFollowTargetLockCheck( + CCameraScriptedTimeline& timeline, + const uint64_t frame, + const float toleranceDeg = CCameraScriptedCheckDefaults::EulerToleranceDeg, + const float screenToleranceNdc = CCameraScriptedCheckDefaults::FollowScreenToleranceNdc) + { + CCameraScriptedInputCheck entry; + entry.frame = frame; + entry.kind = CCameraScriptedInputCheck::Kind::FollowTargetLock; + entry.eulerToleranceDeg = toleranceDeg; + entry.posTolerance = screenToleranceNdc; + timeline.checks.emplace_back(std::move(entry)); + } +}; + +/// @brief Per-frame scripted runtime batch already partitioned by payload kind. +/// +/// Consumers can dequeue authored events for one frame and then adapt only the buckets they care +/// about, without repeatedly switching on `CCameraScriptedInputEvent::Type` in local glue. +struct CCameraScriptedFrameEvents +{ + std::vector keyboard; + std::vector mouse; + std::vector imguizmo; + std::vector goals; + std::vector trackedTargetTransforms; + std::vector segmentLabels; + + inline void clear() + { + keyboard.clear(); + mouse.clear(); + imguizmo.clear(); + goals.clear(); + trackedTargetTransforms.clear(); + segmentLabels.clear(); + } + + inline bool empty() const + { + return keyboard.empty() && mouse.empty() && imguizmo.empty() && + goals.empty() && trackedTargetTransforms.empty() && segmentLabels.empty(); + } +}; + +/// @brief Dequeue all authored scripted events scheduled for one frame. +struct CCameraScriptedFrameEventUtilities final +{ + static inline void dequeueScriptedFrameEvents( + const std::vector& events, + size_t& nextEventIndex, + const uint64_t frame, + CCameraScriptedFrameEvents& out) + { + out.clear(); + while (nextEventIndex < events.size() && events[nextEventIndex].frame == frame) + { + const auto& ev = events[nextEventIndex]; + switch (ev.type) + { + case CCameraScriptedInputEvent::Type::Keyboard: + out.keyboard.emplace_back(ev.keyboard); + break; + case CCameraScriptedInputEvent::Type::Mouse: + out.mouse.emplace_back(ev.mouse); + break; + case CCameraScriptedInputEvent::Type::Imguizmo: + out.imguizmo.emplace_back(ev.imguizmo); + break; + case CCameraScriptedInputEvent::Type::Goal: + out.goals.emplace_back(ev.goal); + break; + case CCameraScriptedInputEvent::Type::TrackedTargetTransform: + out.trackedTargetTransforms.emplace_back(ev.trackedTargetTransform); + break; + case CCameraScriptedInputEvent::Type::SegmentLabel: + out.segmentLabels.emplace_back(ev.segmentLabel.label); + break; + } + + ++nextEventIndex; + } + } +}; + +} // namespace nbl::system + +#endif diff --git a/include/nbl/ext/Cameras/CCameraScriptedUiInputUtilities.hpp b/include/nbl/ext/Cameras/CCameraScriptedUiInputUtilities.hpp new file mode 100644 index 0000000000..6015958c3c --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraScriptedUiInputUtilities.hpp @@ -0,0 +1,98 @@ +#ifndef _C_CAMERA_SCRIPTED_UI_INPUT_UTILITIES_HPP_ +#define _C_CAMERA_SCRIPTED_UI_INPUT_UTILITIES_HPP_ + +#include +#include + +#include "CCameraScriptedRuntime.hpp" +#include "nbl/ui/SInputEvent.h" + +namespace nbl::ui +{ + +/// @brief Convert authored scripted keyboard and mouse payloads into runtime UI input events. +/// +/// The scripted runtime stores compact authoring-friendly payloads. This helper +/// expands them into the concrete `SKeyboardEvent` and `SMouseEvent` objects +/// consumed by the same input path as live window events. +struct CCameraScriptedUiInputUtilities final +{ + /// @brief Build one runtime keyboard event from authored scripted keyboard data. + static inline SKeyboardEvent makeScriptedKeyboardEvent( + const std::chrono::microseconds timestamp, + IWindow* const window, + const system::CCameraScriptedInputEvent::KeyboardData& authoredKeyboard) + { + SKeyboardEvent event(timestamp); + event.keyCode = authoredKeyboard.key; + event.action = + authoredKeyboard.action == system::CCameraScriptedInputEvent::KeyboardData::Action::Pressed ? + SKeyboardEvent::ECA_PRESSED : + SKeyboardEvent::ECA_RELEASED; + event.window = window; + return event; + } + + /// @brief Build one runtime mouse event from authored scripted mouse data. + static inline bool tryBuildScriptedMouseEvent( + const std::chrono::microseconds timestamp, + IWindow* const window, + const system::CCameraScriptedInputEvent::MouseData& authoredMouse, + SMouseEvent& outEvent) + { + outEvent = SMouseEvent(timestamp); + outEvent.window = window; + + switch (authoredMouse.type) + { + case system::CCameraScriptedInputEvent::MouseData::Type::Click: + outEvent.type = SMouseEvent::EET_CLICK; + outEvent.clickEvent.mouseButton = authoredMouse.button; + outEvent.clickEvent.action = + authoredMouse.action == system::CCameraScriptedInputEvent::MouseData::ClickAction::Pressed ? + SMouseEvent::SClickEvent::EA_PRESSED : + SMouseEvent::SClickEvent::EA_RELEASED; + outEvent.clickEvent.clickPosX = authoredMouse.position.x; + outEvent.clickEvent.clickPosY = authoredMouse.position.y; + return true; + case system::CCameraScriptedInputEvent::MouseData::Type::Scroll: + outEvent.type = SMouseEvent::EET_SCROLL; + outEvent.scrollEvent.verticalScroll = authoredMouse.scroll.x; + outEvent.scrollEvent.horizontalScroll = authoredMouse.scroll.y; + return true; + case system::CCameraScriptedInputEvent::MouseData::Type::Movement: + outEvent.type = SMouseEvent::EET_MOVEMENT; + outEvent.movementEvent.relativeMovementX = authoredMouse.delta.x; + outEvent.movementEvent.relativeMovementY = authoredMouse.delta.y; + return true; + default: + return false; + } + } + + /// @brief Append one authored scripted input batch to existing runtime event buffers. + static inline void appendScriptedUiInputEvents( + const std::chrono::microseconds timestamp, + IWindow* const window, + const std::vector& authoredKeyboard, + const std::vector& authoredMouse, + std::vector& outKeyboard, + std::vector& outMouse) + { + outKeyboard.reserve(outKeyboard.size() + authoredKeyboard.size()); + for (const auto& keyboardEvent : authoredKeyboard) + outKeyboard.emplace_back(makeScriptedKeyboardEvent(timestamp, window, keyboardEvent)); + + outMouse.reserve(outMouse.size() + authoredMouse.size()); + for (const auto& mouseEvent : authoredMouse) + { + SMouseEvent builtEvent(timestamp); + if (tryBuildScriptedMouseEvent(timestamp, window, mouseEvent, builtEvent)) + outMouse.emplace_back(builtEvent); + } + } +}; + +} // namespace nbl::ui + +#endif // _C_CAMERA_SCRIPTED_UI_INPUT_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraSequenceScript.hpp b/include/nbl/ext/Cameras/CCameraSequenceScript.hpp new file mode 100644 index 0000000000..feb026ad3f --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraSequenceScript.hpp @@ -0,0 +1,366 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_SEQUENCE_SCRIPT_HPP_ +#define _C_CAMERA_SEQUENCE_SCRIPT_HPP_ + +#include +#include +#include +#include +#include +#include + +#include "CCameraMathUtilities.hpp" +#include "CCameraKeyframeTrack.hpp" +#include "CCameraPathUtilities.hpp" +#include "CCameraTargetRelativeUtilities.hpp" +#include "IPlanarProjection.hpp" + +namespace nbl::core +{ + +/// @brief Compact authored camera-sequence format shared by playback, scripting, and validation helpers. +/// +/// The authored file describes: +/// +/// - which camera kind a segment targets +/// - which reusable projection presentations are shown +/// - which keyframed camera goals are sampled over time +/// - which tracked-target poses are sampled over time +/// - which continuity thresholds and capture points are generated +/// +/// The format does not store: +/// +/// - per-frame low-level event dumps +/// - runtime-specific window actions as authored source data +/// - ImGuizmo transforms as the primary authored primitive +/// +/// Consumers may expand the compact sequence into runtime events and per-frame +/// checks. The authored data remains camera-domain data and is not a device- or +/// UI-specific event dump. + +/// @brief Authored projection view request for camera-sequence playback. +struct CCameraSequencePresentation +{ + IPlanarProjection::CProjection::ProjectionType projection = IPlanarProjection::CProjection::Perspective; + bool leftHanded = true; +}; + +/// @brief Shared continuity thresholds authored once and reused per sequence segment. +/// Max bounds are enforced per-step, while minimum progress can be satisfied by either position or rotation change. +struct CCameraSequenceContinuitySettings +{ + bool baseline = true; + bool step = true; + bool hasPosDeltaConstraint = true; + float minPosDelta = 0.00025f; + float maxPosDelta = 2.f; + bool hasEulerDeltaConstraint = false; + float minEulerDeltaDeg = 0.f; + float maxEulerDeltaDeg = 1.f; +}; + +/// @brief Relative goal adjustment authored against an initial preset captured from the target camera. +/// Deltas stay camera-domain and avoid binding the authored file to any specific input device or consumer. +struct CCameraSequenceGoalDelta +{ + struct SOrbitDelta final + { + hlsl::float64_t2 uvDeltaRad = hlsl::float64_t2(0.0); + float distanceDelta = 0.f; + bool hasU = false; + bool hasV = false; + bool hasDistance = false; + + inline bool hasAny() const + { + return hasU || hasV || hasDistance; + } + + inline void setUDeltaDeg(const double valueDeg) + { + uvDeltaRad.x = static_cast(hlsl::radians(valueDeg)); + hasU = true; + } + + inline void setVDeltaDeg(const double valueDeg) + { + uvDeltaRad.y = static_cast(hlsl::radians(valueDeg)); + hasV = true; + } + + inline void setDistanceDelta(const float valueScalar) + { + distanceDelta = valueScalar; + hasDistance = true; + } + }; + + struct SPathDelta final + { + SCameraPathDelta value = {}; + bool hasS = false; + bool hasU = false; + bool hasV = false; + bool hasRoll = false; + + inline bool hasAny() const + { + return hasS || hasU || hasV || hasRoll; + } + + inline void setSDeltaDeg(const double valueDeg) + { + value.s = static_cast(hlsl::radians(valueDeg)); + hasS = true; + } + + inline void setUDelta(const double valueScalar) + { + value.u = static_cast(valueScalar); + hasU = true; + } + + inline void setVDelta(const double valueScalar) + { + value.v = static_cast(valueScalar); + hasV = true; + } + + inline void setRollDeltaDeg(const double valueDeg) + { + value.roll = static_cast(hlsl::radians(valueDeg)); + hasRoll = true; + } + + inline SCameraPathDelta buildAppliedDelta() const + { + SCameraPathDelta delta = {}; + if (hasS) + delta.s = value.s; + if (hasU) + delta.u = value.u; + if (hasV) + delta.v = value.v; + if (hasRoll) + delta.roll = value.roll; + return delta; + } + }; + + bool hasPositionOffset = false; + hlsl::float64_t3 positionOffset = hlsl::float64_t3(0.0); + + bool hasRotationEulerDegOffset = false; + hlsl::float32_t3 rotationEulerDegOffset = hlsl::float32_t3(0.f); + + bool hasTargetOffset = false; + hlsl::float64_t3 targetOffset = hlsl::float64_t3(0.0); + + SOrbitDelta orbitDelta = {}; + + SPathDelta pathDelta = {}; + + bool hasDynamicBaseFovDelta = false; + float dynamicBaseFovDelta = 0.f; + + bool hasDynamicReferenceDistanceDelta = false; + float dynamicReferenceDistanceDelta = 0.f; +}; + +/// @brief One authored keyframe inside a reusable camera-sequence segment. +/// A keyframe can be described either as an absolute preset or as a delta relative to the captured reference preset. +struct CCameraSequenceKeyframe +{ + float time = 0.f; + bool hasAbsolutePreset = false; + CCameraPreset absolutePreset = {}; + bool hasDelta = false; + CCameraSequenceGoalDelta delta = {}; +}; + +/// @brief Concrete tracked-target pose sampled from a shared authored sequence. +struct CCameraSequenceTrackedTargetPose final : SCameraRigPose +{ +}; + +/// @brief Relative tracked-target adjustment authored against an initial tracked-target pose. +struct CCameraSequenceTrackedTargetDelta +{ + bool hasPositionOffset = false; + hlsl::float64_t3 positionOffset = hlsl::float64_t3(0.0); + + bool hasRotationEulerDegOffset = false; + hlsl::float32_t3 rotationEulerDegOffset = hlsl::float32_t3(0.f); +}; + +/// @brief One authored tracked-target keyframe inside a reusable camera-sequence segment. +/// Target keyframes stay camera-domain and can drive follow behavior without runtime-object references. +struct CCameraSequenceTrackedTargetKeyframe +{ + float time = 0.f; + bool hasAbsolutePosition = false; + hlsl::float64_t3 absolutePosition = hlsl::float64_t3(0.0); + bool hasAbsoluteRotationEulerDeg = false; + hlsl::float32_t3 absoluteRotationEulerDeg = hlsl::float32_t3(0.f); + bool hasDelta = false; + CCameraSequenceTrackedTargetDelta delta = {}; +}; + +/// @brief Runtime sampled tracked-target track built from an authored segment plus a reference pose. +/// Keyframes are normalized by time before sampling. Duplicate times collapse to the last authored pose. +struct CCameraSequenceTrackedTargetTrack +{ + struct SKeyframe + { + float time = 0.f; + CCameraSequenceTrackedTargetPose pose = {}; + }; + + std::vector keyframes; +}; + +/// @brief Defaults shared by all camera-sequence segments unless overridden locally. +struct CCameraSequenceSegmentDefaults +{ + float durationSeconds = 4.f; + std::vector presentations; + CCameraSequenceContinuitySettings continuity = {}; + std::vector captureFractions = { 1.f }; + bool resetCamera = true; +}; + +/// @brief Authored reusable camera-sequence segment. +/// A segment is the main unit of authored playback and validation and usually maps to one camera showcase chunk. +struct CCameraSequenceSegment +{ + std::string name; + ICamera::CameraKind cameraKind = ICamera::CameraKind::Unknown; + std::string cameraIdentifier; + + bool hasDurationSeconds = false; + float durationSeconds = 0.f; + + bool hasResetCamera = false; + bool resetCamera = true; + + std::vector presentations; + + bool hasContinuity = false; + CCameraSequenceContinuitySettings continuity = {}; + + bool hasCaptureFractions = false; + std::vector captureFractions; + + std::vector keyframes; + std::vector targetKeyframes; +}; + +/// @brief Top-level reusable camera-sequence script. +/// +/// This type stores the compact authored description that is later expanded +/// into runtime playback and check payloads. +struct CCameraSequenceScript +{ + bool enabled = true; + bool log = false; + bool exclusive = false; + bool hardFail = false; + bool visualDebug = false; + float visualDebugTargetFps = 0.f; + float visualDebugHoldSeconds = 0.f; + bool hasEnableActiveCameraMovement = false; + bool enableActiveCameraMovement = true; + std::string capturePrefix = "script"; + float fps = 60.f; + CCameraSequenceSegmentDefaults defaults = {}; + std::vector segments; +}; + +/// @brief Reusable compiled sequence segment derived from authored data plus captured references. +/// Consumers can build their own runtime actions/checks from this normalized representation. +struct CCameraSequenceCompiledSegment +{ + std::string name; + std::vector presentations; + CCameraSequenceContinuitySettings continuity = {}; + bool resetCamera = true; + float durationSeconds = 0.f; + uint64_t durationFrames = 0ull; + std::vector sampleTimes; + std::vector captureFrameOffsets; + CCameraKeyframeTrack track = {}; + CCameraSequenceTrackedTargetTrack trackedTargetTrack = {}; + + inline bool usesTrackedTargetTrack() const + { + return !trackedTargetTrack.keyframes.empty(); + } +}; + +/// @brief One compiled frame policy entry derived from a reusable compiled segment. +/// Consumers can map these booleans to their own runtime checks and capture requests. +struct CCameraSequenceCompiledFramePolicy +{ + uint64_t frameOffset = 0ull; + float sampleTime = 0.f; + bool capture = false; + bool baseline = false; + bool continuityStep = false; + bool followTargetLock = false; +}; + +struct CCameraSequenceScriptUtilities final +{ + static bool tryParseCameraKind(std::string_view value, ICamera::CameraKind& outKind); + static bool tryParseProjectionType(std::string_view value, IPlanarProjection::CProjection::ProjectionType& outType); + static void normalizeCaptureFractions(std::vector& fractions); + static bool buildSequenceKeyframePreset(const CCameraPreset& reference, const CCameraSequenceKeyframe& authored, CCameraPreset& outPreset, std::string* error = nullptr); + static bool buildSequenceTrackFromReference(const CCameraPreset& reference, const CCameraSequenceSegment& segment, CCameraKeyframeTrack& outTrack, std::string* error = nullptr); + static bool isSequenceTrackedTargetPoseFinite(const CCameraSequenceTrackedTargetPose& pose); + static bool buildSequenceTrackedTargetPoseFromReference( + const CCameraSequenceTrackedTargetPose& reference, + const CCameraSequenceTrackedTargetKeyframe& authored, + CCameraSequenceTrackedTargetPose& outPose, + std::string* error = nullptr); + static bool buildSequenceTrackedTargetTrackFromReference( + const CCameraSequenceTrackedTargetPose& reference, + const CCameraSequenceSegment& segment, + CCameraSequenceTrackedTargetTrack& outTrack, + std::string* error = nullptr); + static bool tryBuildSequenceTrackedTargetPoseAtTime( + const CCameraSequenceTrackedTargetTrack& track, + float time, + CCameraSequenceTrackedTargetPose& outPose); + static bool sequenceSegmentUsesTrackedTargetTrack(const CCameraSequenceSegment& segment); + static float getSequenceSegmentDurationSeconds(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment, const CCameraKeyframeTrack* track = nullptr); + static const std::vector& getSequenceSegmentPresentations(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment); + static CCameraSequenceContinuitySettings getSequenceSegmentContinuity(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment); + static std::vector getSequenceSegmentCaptureFractions(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment); + static bool getSequenceSegmentResetCamera(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment); + static bool sequenceScriptUsesMultiplePresentations(const CCameraSequenceScript& script); + static uint64_t buildSequenceDurationFrames(float durationSeconds, float fps); + static void buildSequenceSampleTimes(float durationSeconds, uint64_t durationFrames, std::vector& outTimes); + static void buildSequenceCaptureFrameOffsets( + uint64_t durationFrames, + const std::vector& captureFractions, + std::vector& outOffsets); + static bool compileSequenceSegmentFromReference( + const CCameraSequenceScript& script, + const CCameraSequenceSegment& segment, + const CCameraPreset& referencePreset, + const CCameraSequenceTrackedTargetPose& referenceTrackedTargetPose, + CCameraSequenceCompiledSegment& outSegment, + std::string* error = nullptr); + static bool buildCompiledSegmentFramePolicies( + const CCameraSequenceCompiledSegment& segment, + std::vector& outPolicies, + bool includeFollowTargetLock = false); +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_SEQUENCE_SCRIPT_HPP_ + diff --git a/include/nbl/ext/Cameras/CCameraSequenceScriptPersistence.hpp b/include/nbl/ext/Cameras/CCameraSequenceScriptPersistence.hpp new file mode 100644 index 0000000000..fdc73e9937 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraSequenceScriptPersistence.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_SEQUENCE_SCRIPT_PERSISTENCE_HPP_ +#define _C_CAMERA_SEQUENCE_SCRIPT_PERSISTENCE_HPP_ + +#include +#include + +#include "CCameraSequenceScript.hpp" +#include "nbl/system/path.h" + +namespace nbl::system +{ + +class ISystem; + +struct CCameraSequenceScriptPersistenceUtilities final +{ + /// @brief Parse one compact camera-sequence script directly from JSON text. + static bool deserializeCameraSequenceScript(std::string_view text, core::CCameraSequenceScript& out, std::string* error = nullptr); + /// @brief Load one compact camera-sequence script from a file. + static bool loadCameraSequenceScriptFromFile(ISystem& system, const path& path, core::CCameraSequenceScript& out, std::string* error = nullptr); +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_SEQUENCE_SCRIPT_PERSISTENCE_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraSmokeRegressionUtilities.hpp b/include/nbl/ext/Cameras/CCameraSmokeRegressionUtilities.hpp new file mode 100644 index 0000000000..9ca40340b1 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraSmokeRegressionUtilities.hpp @@ -0,0 +1,122 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_SMOKE_REGRESSION_UTILITIES_HPP_ +#define _C_CAMERA_SMOKE_REGRESSION_UTILITIES_HPP_ + +#include + +#include "CCameraKeyframeTrack.hpp" +#include "CCameraMathUtilities.hpp" +#include "CCameraPresetFlow.hpp" +#include "ICamera.hpp" + +namespace nbl::system +{ + +using SCameraManipulationDelta = hlsl::SCameraPoseDelta; + +struct SCameraSmokeComparisonThresholds final +{ + static constexpr double TinyScalarEpsilon = core::SCameraToolingThresholds::TinyScalarEpsilon; + static constexpr double DefaultPositionTolerance = core::SCameraToolingThresholds::DefaultPositionTolerance; + static constexpr double DefaultAngularToleranceDeg = core::SCameraToolingThresholds::DefaultAngularToleranceDeg; + static constexpr double DefaultScalarTolerance = core::SCameraToolingThresholds::ScalarTolerance; + static constexpr double StrictPositionTolerance = core::SCameraToolingThresholds::ScalarTolerance; + static constexpr double StrictAngularToleranceDeg = core::SCameraToolingThresholds::DefaultAngularToleranceDeg; + static constexpr double StrictScalarTolerance = core::SCameraToolingThresholds::ScalarTolerance; + static constexpr double TrackTimeTolerance = core::SCameraToolingThresholds::ScalarTolerance; +}; + +struct CCameraSmokeRegressionUtilities final +{ +public: + /// @brief Measure one camera pose delta against an authored reference pose. + static inline bool tryComputeCameraManipulationDelta( + core::ICamera* camera, + const hlsl::float64_t3& beforePosition, + const hlsl::camera_quaternion_t& beforeOrientation, + SCameraManipulationDelta& outDelta) + { + outDelta = {}; + if (!camera) + return false; + + const auto& gimbal = camera->getGimbal(); + const auto afterPosition = gimbal.getPosition(); + const auto afterOrientation = hlsl::CCameraMathUtilities::normalizeQuaternion(gimbal.getOrientation()); + return hlsl::CCameraMathUtilities::tryComputePoseDelta(afterPosition, afterOrientation, beforePosition, beforeOrientation, outDelta); + } + + /// @brief Manipulate a camera and report how far its pose moved in position and Euler-angle terms. + static inline bool tryManipulateCameraAndMeasureDelta( + core::ICamera* camera, + std::span events, + SCameraManipulationDelta& outDelta, + const double tinyEpsilon = SCameraSmokeComparisonThresholds::TinyScalarEpsilon) + { + outDelta = {}; + if (!camera || events.empty()) + return false; + + const auto& beforeGimbal = camera->getGimbal(); + const auto beforePosition = beforeGimbal.getPosition(); + const auto beforeOrientation = hlsl::CCameraMathUtilities::normalizeQuaternion(beforeGimbal.getOrientation()); + if (!hlsl::CCameraMathUtilities::isFiniteVec3(beforePosition) || !hlsl::CCameraMathUtilities::isFiniteQuaternion(beforeOrientation)) + return false; + + if (!camera->manipulate(events)) + return false; + + if (!tryComputeCameraManipulationDelta(camera, beforePosition, beforeOrientation, outDelta)) + return false; + + return outDelta.position > tinyEpsilon || outDelta.rotationDeg > tinyEpsilon; + } + + static inline bool comparePresetToCameraStateWithDefaultThresholds( + const core::CCameraGoalSolver& solver, + core::ICamera* camera, + const core::CCameraPreset& preset) + { + return core::CCameraPresetFlowUtilities::comparePresetToCameraState( + solver, + camera, + preset, + SCameraSmokeComparisonThresholds::DefaultPositionTolerance, + SCameraSmokeComparisonThresholds::DefaultAngularToleranceDeg, + SCameraSmokeComparisonThresholds::DefaultScalarTolerance); + } + + static inline bool comparePresetToCameraStateWithStrictThresholds( + const core::CCameraGoalSolver& solver, + core::ICamera* camera, + const core::CCameraPreset& preset) + { + return core::CCameraPresetFlowUtilities::comparePresetToCameraState( + solver, + camera, + preset, + SCameraSmokeComparisonThresholds::StrictPositionTolerance, + SCameraSmokeComparisonThresholds::StrictAngularToleranceDeg, + SCameraSmokeComparisonThresholds::StrictScalarTolerance); + } + + static inline bool compareKeyframeTrackContentWithStrictThresholds( + const core::CCameraKeyframeTrack& lhs, + const core::CCameraKeyframeTrack& rhs) + { + return core::CCameraKeyframeTrackUtilities::compareKeyframeTrackContent( + lhs, + rhs, + SCameraSmokeComparisonThresholds::TrackTimeTolerance, + SCameraSmokeComparisonThresholds::StrictPositionTolerance, + SCameraSmokeComparisonThresholds::StrictAngularToleranceDeg, + SCameraSmokeComparisonThresholds::StrictScalarTolerance); + } +}; + +} // namespace nbl::system + +#endif // _C_CAMERA_SMOKE_REGRESSION_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraTargetRelativeUtilities.hpp b/include/nbl/ext/Cameras/CCameraTargetRelativeUtilities.hpp new file mode 100644 index 0000000000..6270d718a0 --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraTargetRelativeUtilities.hpp @@ -0,0 +1,270 @@ +#ifndef _C_CAMERA_TARGET_RELATIVE_UTILITIES_HPP_ +#define _C_CAMERA_TARGET_RELATIVE_UTILITIES_HPP_ + +#include + +#include "SCameraRigPose.hpp" +#include "CCameraVirtualEventUtilities.hpp" + +namespace nbl::core +{ + +/// @brief Canonical target-relative orbit state used by spherical cameras, follow, and goal solving. +struct SCameraTargetRelativeState final +{ + hlsl::float64_t3 target = hlsl::float64_t3(0.0); + hlsl::float64_t2 orbitUv = hlsl::float64_t2(0.0); + float distance = SCameraTargetRelativeTraits::MinDistance; +}; + +/// @brief Pose reconstructed from a target-relative orbit state. +struct SCameraTargetRelativePose final : SCameraRigPose +{ + hlsl::float64_t appliedDistance = static_cast(SCameraTargetRelativeTraits::MinDistance); +}; + +/// @brief Derived basis for target-relative orbit rigs. +struct SCameraTargetRelativeBasis final +{ + hlsl::float64_t3 localOffset = hlsl::float64_t3(0.0); + hlsl::float64_t3 right = hlsl::float64_t3(1.0, 0.0, 0.0); + hlsl::float64_t3 up = hlsl::float64_t3(0.0, 0.0, 1.0); + hlsl::float64_t3 forward = hlsl::float64_t3(0.0, 1.0, 0.0); +}; + +/// @brief Delta between current spherical target state and canonical target-relative goal. +struct SCameraTargetRelativeDelta final +{ + hlsl::float64_t2 orbitUv = hlsl::float64_t2(0.0); + double distance = 0.0; + + inline hlsl::float64_t3 orbitVector() const + { + return hlsl::float64_t3(orbitUv.y, orbitUv.x, 0.0); + } +}; + +/// @brief Mapping policy describing how a target-relative delta is converted into virtual events. +struct SCameraTargetRelativeEventPolicy final +{ + bool translateOrbit = false; + bool allowYaw = true; + bool allowPitch = true; + SCameraVirtualEventAxisBinding distanceBinding = { + CVirtualGimbalEvent::MoveForward, + CVirtualGimbalEvent::MoveBackward + }; +}; + +/// @brief Default constants and event policies used by target-relative rigs. +struct SCameraTargetRelativeRigDefaults final +{ + static constexpr float InitialDistance = 1.0f; + static constexpr double ArcballPitchLimitRad = hlsl::SCameraViewRigDefaults::ArcballPitchLimitRad; + static constexpr double TurntablePitchLimitRad = hlsl::SCameraViewRigDefaults::TurntablePitchLimitRad; + static constexpr double ChaseMaxPitchRad = hlsl::SCameraViewRigDefaults::ChaseMaxPitchRad; + static constexpr double ChaseMinPitchRad = hlsl::SCameraViewRigDefaults::ChaseMinPitchRad; + static constexpr double DollyPitchLimitRad = hlsl::SCameraViewRigDefaults::DollyPitchLimitRad; + static constexpr double TopDownPitchRad = hlsl::SCameraViewRigDefaults::TopDownPitchRad; + static constexpr double IsometricYawRad = hlsl::SCameraViewRigDefaults::IsometricYawRad; + static inline const double IsometricPitchRad = hlsl::SCameraViewRigDefaults::IsometricPitchRad; + + static inline constexpr SCameraTargetRelativeEventPolicy OrbitTranslatePolicy = { + .translateOrbit = true + }; + static inline constexpr SCameraTargetRelativeEventPolicy RotateDistancePolicy = { + .translateOrbit = false, + .allowYaw = true, + .allowPitch = true + }; + static inline constexpr SCameraTargetRelativeEventPolicy TopDownPolicy = { + .translateOrbit = false, + .allowYaw = true, + .allowPitch = false + }; + static inline constexpr SCameraTargetRelativeEventPolicy IsometricPolicy = { + .translateOrbit = false, + .allowYaw = false, + .allowPitch = false + }; + static inline constexpr SCameraTargetRelativeEventPolicy DollyPolicy = { + .translateOrbit = false, + .allowYaw = true, + .allowPitch = true, + .distanceBinding = { + CVirtualGimbalEvent::None, + CVirtualGimbalEvent::None + } + }; + static inline constexpr SCameraTargetRelativeEventPolicy ChasePolicy = { + .translateOrbit = false, + .allowYaw = true, + .allowPitch = true, + .distanceBinding = { + CVirtualGimbalEvent::MoveUp, + CVirtualGimbalEvent::MoveDown + } + }; +}; + +/// @brief Helpers for converting between target-relative state, pose, basis, and virtual-event deltas. +struct CCameraTargetRelativeUtilities final +{ + static inline bool tryBuildTargetRelativeStateFromPosition( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const float minDistance, + const float maxDistance, + SCameraTargetRelativeState& outState) + { + outState = {}; + outState.target = targetPosition; + + hlsl::float64_t appliedDistance = static_cast(minDistance); + if (!hlsl::CCameraMathUtilities::tryBuildOrbitFromPosition( + targetPosition, + position, + static_cast(minDistance), + static_cast(maxDistance), + outState.orbitUv, + appliedDistance)) + { + return false; + } + + outState.distance = static_cast(appliedDistance); + return true; + } + + static inline bool tryBuildTargetRelativePoseFromState( + const SCameraTargetRelativeState& state, + const float minDistance, + const float maxDistance, + SCameraTargetRelativePose& outPose) + { + outPose = {}; + return hlsl::CCameraMathUtilities::tryBuildSphericalPoseFromOrbit( + state.target, + state.orbitUv, + static_cast(state.distance), + static_cast(minDistance), + static_cast(maxDistance), + outPose.position, + outPose.orientation, + &outPose.appliedDistance); + } + + static inline bool tryBuildTargetRelativeBasis( + const SCameraTargetRelativeState& state, + const float minDistance, + const float maxDistance, + SCameraTargetRelativeBasis& outBasis) + { + SCameraTargetRelativePose pose = {}; + if (!tryBuildTargetRelativePoseFromState(state, minDistance, maxDistance, pose)) + return false; + + outBasis.localOffset = pose.position - state.target; + const auto basis = hlsl::CCameraMathUtilities::getQuaternionBasisMatrix(pose.orientation); + outBasis.right = basis[0]; + outBasis.up = basis[1]; + outBasis.forward = basis[2]; + return true; + } + + static inline bool tryBuildTargetRelativePoseFromPosition( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const float minDistance, + const float maxDistance, + SCameraTargetRelativePose& outPose, + SCameraTargetRelativeState* outState = nullptr) + { + SCameraTargetRelativeState state = {}; + if (!tryBuildTargetRelativeStateFromPosition(targetPosition, position, minDistance, maxDistance, state)) + return false; + + if (!tryBuildTargetRelativePoseFromState(state, minDistance, maxDistance, outPose)) + return false; + + if (outState) + *outState = state; + return true; + } + + static inline SCameraTargetRelativeDelta buildTargetRelativeDelta( + const ICamera::SphericalTargetState& currentState, + const SCameraTargetRelativeState& desiredState) + { + return { + .orbitUv = hlsl::float64_t2( + hlsl::CCameraMathUtilities::wrapAngleRad(desiredState.orbitUv.x - currentState.orbitUv.x), + hlsl::CCameraMathUtilities::wrapAngleRad(desiredState.orbitUv.y - currentState.orbitUv.y)), + .distance = static_cast(desiredState.distance - currentState.distance) + }; + } + + static inline void appendTargetRelativeDeltaEvents( + std::vector& events, + const SCameraTargetRelativeDelta& delta, + const double angularDenominator, + const double angularToleranceDeg, + const double distanceDenominator, + const double distanceTolerance, + const SCameraTargetRelativeEventPolicy& policy) + { + if (policy.translateOrbit) + { + CCameraVirtualEventUtilities::appendAngularAxisEvents( + events, + delta.orbitVector(), + hlsl::float64_t3(angularDenominator), + hlsl::float64_t3(angularToleranceDeg, angularToleranceDeg, std::numeric_limits::infinity()), + {{ + { CVirtualGimbalEvent::MoveRight, CVirtualGimbalEvent::MoveLeft }, + { CVirtualGimbalEvent::MoveUp, CVirtualGimbalEvent::MoveDown }, + { CVirtualGimbalEvent::None, CVirtualGimbalEvent::None } + }}); + } + else + { + if (policy.allowYaw) + { + CCameraVirtualEventUtilities::appendAngularDeltaEvent( + events, + delta.orbitUv.x, + angularDenominator, + angularToleranceDeg, + CVirtualGimbalEvent::PanRight, + CVirtualGimbalEvent::PanLeft); + } + if (policy.allowPitch) + { + CCameraVirtualEventUtilities::appendAngularDeltaEvent( + events, + delta.orbitUv.y, + angularDenominator, + angularToleranceDeg, + CVirtualGimbalEvent::TiltUp, + CVirtualGimbalEvent::TiltDown); + } + } + + if (policy.distanceBinding.positive != CVirtualGimbalEvent::None && + policy.distanceBinding.negative != CVirtualGimbalEvent::None) + { + CCameraVirtualEventUtilities::appendScaledVirtualEvent( + events, + delta.distance, + distanceDenominator, + distanceTolerance, + policy.distanceBinding.positive, + policy.distanceBinding.negative); + } + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_TARGET_RELATIVE_UTILITIES_HPP_ + diff --git a/include/nbl/ext/Cameras/CCameraTextUtilities.hpp b/include/nbl/ext/Cameras/CCameraTextUtilities.hpp new file mode 100644 index 0000000000..7d2422dd3d --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraTextUtilities.hpp @@ -0,0 +1,210 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_TEXT_UTILITIES_HPP_ +#define _C_CAMERA_TEXT_UTILITIES_HPP_ + +#include +#include +#include + +#include "CCameraFollowUtilities.hpp" +#include "CCameraGoalAnalysis.hpp" +#include "CCameraPresetFlow.hpp" + +namespace nbl::ui +{ + +struct CCameraTextUtilities final +{ +public: + /// @brief Return a short human-readable label for a camera kind. + static inline std::string_view getCameraTypeLabel(const core::ICamera::CameraKind kind) + { + return core::CCameraKindUtilities::getCameraKindLabel(kind); + } + + /// @brief Return a short human-readable label for a concrete camera instance. + static inline std::string_view getCameraTypeLabel(const core::ICamera* camera) + { + return camera ? getCameraTypeLabel(camera->getKind()) : "Unknown"; + } + + /// @brief Return a short human-readable description for a camera kind. + static inline std::string_view getCameraTypeDescription(const core::ICamera::CameraKind kind) + { + return core::CCameraKindUtilities::getCameraKindDescription(kind); + } + + /// @brief Return a short human-readable description for a concrete camera instance. + static inline std::string_view getCameraTypeDescription(const core::ICamera* camera) + { + return camera ? getCameraTypeDescription(camera->getKind()) : "Unspecified camera behavior"; + } + + /// @brief Return a short human-readable label for a follow mode. + static inline constexpr const char* getCameraFollowModeLabel(const core::ECameraFollowMode mode) + { + switch (mode) + { + case core::ECameraFollowMode::Disabled: return "Disabled"; + case core::ECameraFollowMode::OrbitTarget: return "Orbit target"; + case core::ECameraFollowMode::LookAtTarget: return "Look at target"; + case core::ECameraFollowMode::KeepWorldOffset: return "Keep world offset"; + case core::ECameraFollowMode::KeepLocalOffset: return "Keep local offset"; + default: return "Unknown"; + } + } + + /// @brief Return a short human-readable description for a follow mode. + static inline constexpr const char* getCameraFollowModeDescription(const core::ECameraFollowMode mode) + { + switch (mode) + { + case core::ECameraFollowMode::Disabled: return "Follow disabled"; + case core::ECameraFollowMode::OrbitTarget: return "Keep orbit around moving target and keep it centered"; + case core::ECameraFollowMode::LookAtTarget: return "Keep camera position and lock the view onto the target"; + case core::ECameraFollowMode::KeepWorldOffset: return "Move with the target in world offset and keep it centered"; + case core::ECameraFollowMode::KeepLocalOffset: return "Move with the target in target-local offset and keep it centered"; + default: return "Unknown follow mode"; + } + } + + /// @brief Describe the typed goal-state mask in a stable human-readable format. + static inline std::string describeGoalStateMask(const core::ICamera::goal_state_flags_t mask) + { + if (mask == core::ICamera::GoalStateNone) + return "Pose only"; + + std::string out; + auto append = [&](const char* label, const core::ICamera::GoalStateMask bit) -> void + { + if (!mask.hasFlags(bit)) + return; + if (!out.empty()) + out += ", "; + out += label; + }; + + append("Spherical target", core::ICamera::GoalStateSphericalTarget); + append("Dynamic perspective", core::ICamera::GoalStateDynamicPerspective); + append("Path rig state", core::ICamera::GoalStatePath); + return out; + } + + /// @brief Describe a detailed goal-apply result for logs, smoke tests, and UI summaries. + static inline std::string describeApplyResult(const core::CCameraGoalSolver::SApplyResult& result) + { + std::ostringstream oss; + oss << "status="; + switch (result.status) + { + case core::CCameraGoalSolver::SApplyResult::EStatus::Unsupported: oss << "Unsupported"; break; + case core::CCameraGoalSolver::SApplyResult::EStatus::Failed: oss << "Failed"; break; + case core::CCameraGoalSolver::SApplyResult::EStatus::AlreadySatisfied: oss << "AlreadySatisfied"; break; + case core::CCameraGoalSolver::SApplyResult::EStatus::AppliedAbsoluteOnly: oss << "AppliedAbsoluteOnly"; break; + case core::CCameraGoalSolver::SApplyResult::EStatus::AppliedVirtualEvents: oss << "AppliedVirtualEvents"; break; + case core::CCameraGoalSolver::SApplyResult::EStatus::AppliedAbsoluteAndVirtualEvents: oss << "AppliedAbsoluteAndVirtualEvents"; break; + } + oss << " exact=" << (result.exact ? "true" : "false") + << " events=" << result.eventCount; + + if (result.issues != core::CCameraGoalSolver::SApplyResult::EIssue::NoIssue) + { + oss << " issues="; + bool first = true; + auto appendIssue = [&](const char* label, const core::CCameraGoalSolver::SApplyResult::EIssue issue) -> void + { + if (!result.hasIssue(issue)) + return; + if (!first) + oss << ","; + oss << label; + first = false; + }; + + appendIssue("absolute_pose_fallback", core::CCameraGoalSolver::SApplyResult::EIssue::UsedAbsolutePoseFallback); + appendIssue("missing_spherical_state", core::CCameraGoalSolver::SApplyResult::EIssue::MissingSphericalTargetState); + appendIssue("missing_path_state", core::CCameraGoalSolver::SApplyResult::EIssue::MissingPathState); + appendIssue("missing_dynamic_perspective_state", core::CCameraGoalSolver::SApplyResult::EIssue::MissingDynamicPerspectiveState); + appendIssue("virtual_event_replay_failed", core::CCameraGoalSolver::SApplyResult::EIssue::VirtualEventReplayFailed); + } + + return oss.str(); + } + + /// @brief Describe compatibility preview for applying one analyzed goal to a target camera. + static inline std::string describeGoalApplyCompatibility(const core::SCameraGoalApplyAnalysis& analysis, const core::ICamera* targetCamera) + { + if (!analysis.hasCamera) + return "No active camera"; + + std::ostringstream oss; + oss << (analysis.compatibility.exact ? "Exact" : "Best-effort") + << " | source=" << getCameraTypeLabel(analysis.goal.sourceKind) + << " | target=" << getCameraTypeLabel(targetCamera); + + if (analysis.compatibility.missingGoalStateMask != core::ICamera::GoalStateNone) + oss << " | missing=" << describeGoalStateMask(analysis.compatibility.missingGoalStateMask); + else if (!analysis.compatibility.sameKind && analysis.goal.sourceKind != core::ICamera::CameraKind::Unknown) + oss << " | shared goal state only"; + + return oss.str(); + } + + /// @brief Describe whether an analyzed goal can be meaningfully applied to the target camera. + static inline std::string describeGoalApplyPolicy(const core::SCameraGoalApplyAnalysis& analysis) + { + if (!analysis.hasCamera) + return "Blocked | no active camera"; + if (!analysis.finiteGoal) + return "Blocked | invalid goal state"; + + std::ostringstream oss; + oss << (analysis.compatibility.exact ? "Exact apply" : "Best-effort apply"); + if (analysis.compatibility.missingGoalStateMask != core::ICamera::GoalStateNone) + oss << " | drops=" << describeGoalStateMask(analysis.compatibility.missingGoalStateMask); + else if (!analysis.compatibility.sameKind && analysis.goal.sourceKind != core::ICamera::CameraKind::Unknown) + oss << " | shared goal state only"; + else + oss << " | full preview available"; + + return oss.str(); + } + + /// @brief Describe whether one analyzed camera state can be captured into a reusable goal. + static inline std::string describeCameraCapturePolicy(const core::SCameraCaptureAnalysis& analysis, const core::ICamera* camera) + { + if (!analysis.hasCamera) + return "Blocked | no active camera"; + if (!analysis.capturedGoal) + return "Blocked | goal capture failed"; + if (!analysis.finiteGoal) + return "Blocked | invalid goal state"; + + std::ostringstream oss; + oss << "Ready | source=" << getCameraTypeLabel(camera) + << " | goal=" << describeGoalStateMask(analysis.goal.sourceGoalStateMask); + return oss.str(); + } + + /// @brief Describe the aggregate outcome of applying one preset to multiple cameras. + static inline std::string describePresetApplySummary(const core::SCameraPresetApplySummary& summary, std::string_view noTargetsLabel, std::string_view prefix = "Playback apply") + { + if (!summary.hasTargets()) + return std::string(noTargetsLabel); + + std::ostringstream oss; + oss << prefix << " | targets=" << summary.targetCount << " | ok=" << summary.successCount; + if (summary.approximateCount > 0u) + oss << " | approximate=" << summary.approximateCount; + if (summary.failureCount > 0u) + oss << " | failed=" << summary.failureCount; + return oss.str(); + } +}; + +} // namespace nbl::ui + +#endif // _C_CAMERA_TEXT_UTILITIES_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraTraits.hpp b/include/nbl/ext/Cameras/CCameraTraits.hpp new file mode 100644 index 0000000000..a333c53eed --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraTraits.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_CAMERA_TRAITS_HPP_ +#define _C_CAMERA_TRAITS_HPP_ + +#include + +namespace nbl::core +{ + +/// @brief Geometric constants used by target-relative camera families. +/// +/// `MinDistance` prevents zero-distance target-relative states. +/// `DefaultMaxDistance` is unbounded. Individual cameras and tools may apply +/// their own finite limits on top of it. +struct SCameraTargetRelativeTraits final +{ + /// @brief Smallest valid target-relative distance shared by spherical and path-style rigs. + /// This is a semantic lower bound used to avoid zero-distance degeneracy, not + /// the minimum representable `float` value. + static inline constexpr float MinDistance = 1e-1f; + /// @brief Default upper bound for target-relative distance when no camera-specific cap is requested. + static inline constexpr float DefaultMaxDistance = std::numeric_limits::infinity(); +}; + +/// @brief Comparison thresholds used by helper layers outside the runtime camera interface. +struct SCameraToolingThresholds final +{ + /// @brief Default scalar tolerance used by typed state comparisons. + static inline constexpr double ScalarTolerance = 1e-6; + /// @brief Small epsilon used by replay and comparison helpers that need stricter zero tests. + static inline constexpr double TinyScalarEpsilon = 1e-9; + /// @brief Default world-space position tolerance used by pose comparisons. + static inline constexpr double DefaultPositionTolerance = 2.0 * ScalarTolerance; + /// @brief Default angular tolerance in degrees used by pose and state comparisons. + static inline constexpr double DefaultAngularToleranceDeg = 1e-1; +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_TRAITS_HPP_ diff --git a/include/nbl/ext/Cameras/CCameraVirtualEventUtilities.hpp b/include/nbl/ext/Cameras/CCameraVirtualEventUtilities.hpp new file mode 100644 index 0000000000..31baae246e --- /dev/null +++ b/include/nbl/ext/Cameras/CCameraVirtualEventUtilities.hpp @@ -0,0 +1,188 @@ +#ifndef _C_CAMERA_VIRTUAL_EVENT_UTILITIES_HPP_ +#define _C_CAMERA_VIRTUAL_EVENT_UTILITIES_HPP_ + +#include +#include +#include + +#include "CCameraMathUtilities.hpp" +#include "ICamera.hpp" + +namespace nbl::core +{ + +/// @brief Positive and negative semantic virtual-event pair for one scalar axis. +struct SCameraVirtualEventAxisBinding final +{ + CVirtualGimbalEvent::VirtualEventType positive = CVirtualGimbalEvent::None; + CVirtualGimbalEvent::VirtualEventType negative = CVirtualGimbalEvent::None; +}; + +/// @brief Reusable axis-binding presets shared by helpers that synthesize virtual events. +struct SCameraVirtualEventBindings final +{ + static inline constexpr std::array LocalTranslation = {{ + { CVirtualGimbalEvent::MoveRight, CVirtualGimbalEvent::MoveLeft }, + { CVirtualGimbalEvent::MoveUp, CVirtualGimbalEvent::MoveDown }, + { CVirtualGimbalEvent::MoveForward, CVirtualGimbalEvent::MoveBackward } + }}; +}; + +/// @brief Shared helpers for building and analyzing `CVirtualGimbalEvent` batches. +/// +/// These utilities are reused by goal replay, path-model control translation, +/// scripted tooling, and smoke checks whenever code needs to convert typed deltas +/// into semantic event streams or inspect those streams on the CPU. +struct CCameraVirtualEventUtilities final +{ +public: + /// @brief Append one signed scalar as either the positive or negative event variant. + static inline void appendSignedVirtualEvent( + std::vector& events, + const double value, + const CVirtualGimbalEvent::VirtualEventType positive, + const CVirtualGimbalEvent::VirtualEventType negative, + const double tolerance = static_cast(SCameraToolingThresholds::TinyScalarEpsilon)) + { + if (!hlsl::CCameraMathUtilities::isFiniteScalar(value) || hlsl::CCameraMathUtilities::isNearlyZeroScalar(value, tolerance)) + return; + + auto& ev = events.emplace_back(); + ev.type = (value > 0.0) ? positive : negative; + ev.magnitude = hlsl::abs(value); + } + + /// @brief Append one signed scalar after normalizing it by a caller-provided denominator. + static inline void appendScaledVirtualEvent( + std::vector& events, + const double value, + const double denominator, + const double tolerance, + const CVirtualGimbalEvent::VirtualEventType positive, + const CVirtualGimbalEvent::VirtualEventType negative) + { + if (!hlsl::CCameraMathUtilities::isFiniteScalar(denominator) || hlsl::CCameraMathUtilities::isNearlyZeroScalar(denominator, static_cast(SCameraToolingThresholds::TinyScalarEpsilon))) + return; + + appendSignedVirtualEvent(events, value / denominator, positive, negative, tolerance); + } + + /// @brief Append one angular delta by comparing it against a tolerance expressed in degrees. + static inline void appendAngularDeltaEvent( + std::vector& events, + const double deltaRadians, + const double denominator, + const double toleranceDeg, + const CVirtualGimbalEvent::VirtualEventType positive, + const CVirtualGimbalEvent::VirtualEventType negative) + { + if (!hlsl::CCameraMathUtilities::isFiniteScalar(deltaRadians) || + hlsl::CCameraMathUtilities::isNearlyZeroScalar(hlsl::degrees(deltaRadians), toleranceDeg)) + { + return; + } + + appendScaledVirtualEvent( + events, + deltaRadians, + denominator, + hlsl::radians(toleranceDeg), + positive, + negative); + } + + /// @brief Append one 3-axis scalar bundle through a caller-provided binding table. + static inline void appendScaledVirtualAxisEvents( + std::vector& events, + const hlsl::float64_t3& values, + const hlsl::float64_t3& denominators, + const hlsl::float64_t3& tolerances, + const std::array& axisBindings) + { + for (size_t axisIx = 0u; axisIx < axisBindings.size(); ++axisIx) + { + appendScaledVirtualEvent( + events, + values[axisIx], + denominators[axisIx], + tolerances[axisIx], + axisBindings[axisIx].positive, + axisBindings[axisIx].negative); + } + } + + /// @brief Append a local-space translation delta as semantic move events. + static inline void appendLocalTranslationEvents( + std::vector& events, + const hlsl::float64_t3& localDelta, + const hlsl::float64_t3& denominators = hlsl::float64_t3(1.0), + const hlsl::float64_t3& tolerances = hlsl::float64_t3(SCameraToolingThresholds::TinyScalarEpsilon)) + { + appendScaledVirtualAxisEvents( + events, + localDelta, + denominators, + tolerances, + SCameraVirtualEventBindings::LocalTranslation); + } + + /// @brief Reinterpret a world-space translation delta in the local frame of a camera orientation. + static inline void appendWorldTranslationAsLocalEvents( + std::vector& events, + const hlsl::camera_quaternion_t& orientation, + const hlsl::float64_t3& worldDelta, + const hlsl::float64_t3& denominators = hlsl::float64_t3(1.0), + const hlsl::float64_t3& tolerances = hlsl::float64_t3(SCameraToolingThresholds::TinyScalarEpsilon)) + { + appendLocalTranslationEvents( + events, + hlsl::CCameraMathUtilities::projectWorldVectorToLocalQuaternionFrame(orientation, worldDelta), + denominators, + tolerances); + } + + /// @brief Append one 3-axis angular delta through a caller-provided binding table. + static inline void appendAngularAxisEvents( + std::vector& events, + const hlsl::float64_t3& deltaRadians, + const hlsl::float64_t3& denominators, + const hlsl::float64_t3& toleranceDeg, + const std::array& axisBindings) + { + for (size_t axisIx = 0u; axisIx < axisBindings.size(); ++axisIx) + { + appendAngularDeltaEvent( + events, + deltaRadians[axisIx], + denominators[axisIx], + toleranceDeg[axisIx], + axisBindings[axisIx].positive, + axisBindings[axisIx].negative); + } + } + + /// @brief Accumulate only translation-related virtual events back into a signed delta vector. + static inline hlsl::float64_t3 collectSignedTranslationDelta(std::span events) + { + hlsl::float64_t3 delta = hlsl::float64_t3(0.0); + for (const auto& ev : events) + { + switch (ev.type) + { + case CVirtualGimbalEvent::MoveRight: delta.x += ev.magnitude; break; + case CVirtualGimbalEvent::MoveLeft: delta.x -= ev.magnitude; break; + case CVirtualGimbalEvent::MoveUp: delta.y += ev.magnitude; break; + case CVirtualGimbalEvent::MoveDown: delta.y -= ev.magnitude; break; + case CVirtualGimbalEvent::MoveForward: delta.z += ev.magnitude; break; + case CVirtualGimbalEvent::MoveBackward: delta.z -= ev.magnitude; break; + default: break; + } + } + return delta; + } +}; + +} // namespace nbl::core + +#endif // _C_CAMERA_VIRTUAL_EVENT_UTILITIES_HPP_ + diff --git a/include/nbl/ext/Cameras/CChaseCamera.hpp b/include/nbl/ext/Cameras/CChaseCamera.hpp new file mode 100644 index 0000000000..36d269e077 --- /dev/null +++ b/include/nbl/ext/Cameras/CChaseCamera.hpp @@ -0,0 +1,89 @@ +#ifndef _C_CHASE_CAMERA_HPP_ +#define _C_CHASE_CAMERA_HPP_ + +#include +#include + +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera with planar target translation on the ground plane. +/// +/// Translation is resolved in a planar forward/right frame derived from the +/// current orbit basis. Rotation updates orbit yaw and pitch. Distance remains +/// clamped to the chase-camera limits. +class CChaseCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + CChaseCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(position, target) + { + m_orbitUv.y = std::clamp(m_orbitUv.y, MinPitch, MaxPitch); + applyPose(); + } + ~CChaseCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Apply chase-style planar translation, pitch/yaw orbiting, and distance changes. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceTargetRelativeState(reference, resolvedState)) + { + return false; + } + + resolvedState.orbitUv.y = std::clamp(resolvedState.orbitUv.y, MinPitch, MaxPitch); + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + + const auto deltaRotation = scaleVirtualRotation(impulse.dVirtualRotation); + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const auto deltaDistance = scaleUnscaledVirtualTranslation(impulse.dVirtualTranslate.y); + + const auto basis = computeBasis(m_orbitUv, m_distance); + + const auto planarForward = hlsl::CCameraMathUtilities::safeNormalizeVec3( + hlsl::float64_t3(basis.forward.x, 0.0, basis.forward.z), + hlsl::float64_t3(0.0, 0.0, 1.0)); + const auto planarRight = hlsl::CCameraMathUtilities::safeNormalizeVec3( + hlsl::float64_t3(basis.right.x, 0.0, basis.right.z), + hlsl::float64_t3(1.0, 0.0, 0.0)); + + m_targetPosition += planarRight * deltaTranslation.x + planarForward * deltaTranslation.z; + m_distance = std::clamp(m_distance + static_cast(deltaDistance), MinDistance, MaxDistance); + + m_orbitUv.x += deltaRotation.y; + m_orbitUv.y = std::clamp(m_orbitUv.y + deltaRotation.x, MinPitch, MaxPitch); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::Chase; } + /// @brief Return the stable user-facing identifier for this concrete camera kind. + virtual std::string_view getIdentifier() const override { return "Chase Camera"; } + +private: + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; + static inline constexpr double MaxPitch = SCameraTargetRelativeRigDefaults::ChaseMaxPitchRad; + static inline constexpr double MinPitch = SCameraTargetRelativeRigDefaults::ChaseMinPitchRad; +}; + +} + +#endif diff --git a/include/nbl/ext/Cameras/CCubeProjection.hpp b/include/nbl/ext/Cameras/CCubeProjection.hpp new file mode 100644 index 0000000000..7017e49e00 --- /dev/null +++ b/include/nbl/ext/Cameras/CCubeProjection.hpp @@ -0,0 +1,97 @@ +#ifndef _NBL_CCUBE_PROJECTION_HPP_ +#define _NBL_CCUBE_PROJECTION_HPP_ + +#include "IRange.hpp" +#include "IPerspectiveProjection.hpp" + +namespace nbl::core +{ + +/// @brief Projection where each cube face is a perspective quad. +/// +/// This represents a cube projection for a direction vector where each face of +/// the cube is treated as a quad. Projection onto the cube is done through +/// those quads, each with its own pre-transform and viewport linear matrix. +class CCubeProjection final : public IPerspectiveProjection, public IProjection +{ +public: + /// @brief Represents six face identifiers of a cube. + enum CubeFaces : uint8_t + { + /// @brief Cube face in the +X base direction + PositiveX = 0, + + /// @brief Cube face in the -X base direction + NegativeX, + + /// @brief Cube face in the +Y base direction + PositiveY, + + /// @brief Cube face in the -Y base direction + NegativeY, + + /// @brief Cube face in the +Z base direction + PositiveZ, + + /// @brief Cube face in the -Z base direction + NegativeZ, + + CubeFacesCount + }; + + inline static core::smart_refctd_ptr create(core::smart_refctd_ptr&& camera) + { + if (!camera) + return nullptr; + + return core::smart_refctd_ptr(new CCubeProjection(core::smart_refctd_ptr(camera)), core::dont_grab); + } + + virtual uint32_t getLinearProjectionCount() const override + { + return static_cast(m_quads.size()); + } + + virtual const ILinearProjection::CProjection& getLinearProjection(uint32_t index) const override + { + assert(index < m_quads.size()); + return m_quads[index]; + } + + void transformCube() + { + // Cube-face quad generation is not implemented yet. + } + + virtual ProjectionType getProjectionType() const override { return ProjectionType::Cube; } + + virtual void project(const projection_vector_t& vecToProjectionSpace, projection_vector_t& output) const override + { + auto direction = hlsl::normalize(vecToProjectionSpace); + + // Cube-face projection is not implemented yet. + } + + virtual bool unproject(const projection_vector_t& vecFromProjectionSpace, projection_vector_t& output) const override + { + // Reverse projection is not implemented yet. + } + + template + requires (FaceIx != CubeFacesCount) + inline const CProjection& getProjectionQuad() + { + return m_quads[FaceIx]; + } + +private: + CCubeProjection(core::smart_refctd_ptr&& camera) + : IPerspectiveProjection(core::smart_refctd_ptr(camera)) {} + virtual ~CCubeProjection() = default; + + std::array m_quads; +}; + +} // namespace nbl::core + +#endif // _NBL_CCUBE_PROJECTION_HPP_ diff --git a/include/nbl/ext/Cameras/CDollyCamera.hpp b/include/nbl/ext/Cameras/CDollyCamera.hpp new file mode 100644 index 0000000000..043c656103 --- /dev/null +++ b/include/nbl/ext/Cameras/CDollyCamera.hpp @@ -0,0 +1,80 @@ +#ifndef _C_DOLLY_CAMERA_HPP_ +#define _C_DOLLY_CAMERA_HPP_ + +#include +#include + +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera that translates the target in the full local camera basis. +/// +/// Translation uses the current right/up/forward basis. Rotation updates orbit +/// yaw and pitch while the camera pose is rebuilt from the maintained +/// target-relative offset. +class CDollyCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + CDollyCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(position, target) + { + m_orbitUv.y = std::clamp(m_orbitUv.y, MinPitch, MaxPitch); + applyPose(); + } + ~CDollyCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Apply one frame of local-frame dolly translation plus orbit rotation. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceTargetRelativeState(reference, resolvedState)) + { + return false; + } + + resolvedState.orbitUv.y = std::clamp(resolvedState.orbitUv.y, MinPitch, MaxPitch); + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + + const auto deltaRotation = scaleVirtualRotation(impulse.dVirtualRotation); + + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const auto basis = computeBasis(m_orbitUv, m_distance); + const auto delta = hlsl::CCameraMathUtilities::transformLocalVectorToWorldBasis(deltaTranslation, basis.right, basis.up, basis.forward); + + m_targetPosition += delta; + m_orbitUv.x += deltaRotation.y; + m_orbitUv.y = std::clamp(m_orbitUv.y + deltaRotation.x, MinPitch, MaxPitch); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::Dolly; } + /// @brief Return the stable user-facing identifier for this concrete camera kind. + virtual std::string_view getIdentifier() const override { return "Dolly Camera"; } + +private: + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; + static inline constexpr double MaxPitch = SCameraTargetRelativeRigDefaults::DollyPitchLimitRad; + static inline constexpr double MinPitch = -MaxPitch; +}; + +} + +#endif diff --git a/include/nbl/ext/Cameras/CDollyZoomCamera.hpp b/include/nbl/ext/Cameras/CDollyZoomCamera.hpp new file mode 100644 index 0000000000..86e16a3ded --- /dev/null +++ b/include/nbl/ext/Cameras/CDollyZoomCamera.hpp @@ -0,0 +1,122 @@ +#ifndef _C_DOLLY_ZOOM_CAMERA_HPP_ +#define _C_DOLLY_ZOOM_CAMERA_HPP_ + +#include +#include + +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera that preserves subject framing by coupling distance with a derived perspective FOV. +/// +/// The rig reuses spherical target-relative manipulation but exposes an additional +/// dynamic-perspective state describing the authored base FOV and the reference +/// distance used to compute the current dolly-zoom FOV. +class CDollyZoomCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + CDollyZoomCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target, float baseFov = DefaultBaseFovDeg) + : base_t(position, target), m_baseFov(baseFov), m_referenceDistance(m_distance) + { + applyPose(); + } + ~CDollyZoomCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Return the authored FOV used as the reference value for dolly-zoom evaluation. + float getBaseFov() const { return m_baseFov; } + /// @brief Update the authored reference FOV used for dolly-zoom evaluation. + void setBaseFov(float fov) { m_baseFov = fov; } + + /// @brief Return the reference distance that preserves the authored framing. + float getReferenceDistance() const { return m_referenceDistance; } + /// @brief Update the reference distance used by dolly-zoom FOV evaluation. + void setReferenceDistance(float distance) { m_referenceDistance = distance; } + + /// @brief Evaluate the effective perspective FOV required to preserve subject framing at the current distance. + float computeDollyFov() const + { + const double base = std::tan(hlsl::radians(static_cast(m_baseFov)) * 0.5); + const double ratio = static_cast(m_referenceDistance) / std::max(static_cast(m_distance), static_cast(MinDistance)); + const double fov = 2.0 * std::atan(base * ratio); + const double fovDeg = hlsl::degrees(fov); + return static_cast(std::clamp(fovDeg, static_cast(MinDynamicFovDeg), static_cast(MaxDynamicFovDeg))); + } + + /// @brief Apply one frame of orbit translation and distance input for the dolly-zoom rig. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceTargetRelativeState(reference, resolvedState)) + { + return false; + } + + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const double deltaDistance = scaleUnscaledVirtualTranslation(impulse.dVirtualTranslate.z); + + m_orbitUv += hlsl::float64_t2(deltaTranslation.y, deltaTranslation.x); + m_distance = std::clamp(m_distance + static_cast(deltaDistance), MinDistance, MaxDistance); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::DollyZoom; } + virtual uint32_t getCapabilities() const override { return base_t::getCapabilities() | base_t::DynamicPerspectiveFov; } + /// @brief Query the current derived FOV produced by the dolly-zoom state. + virtual bool tryGetDynamicPerspectiveFov(float& outFov) const override + { + outFov = computeDollyFov(); + return true; + } + /// @brief Query the authored dolly-zoom state used to derive the current dynamic FOV. + virtual bool tryGetDynamicPerspectiveState(DynamicPerspectiveState& out) const override + { + out.baseFov = m_baseFov; + out.referenceDistance = m_referenceDistance; + return true; + } + /// @brief Replace the authored dolly-zoom state after validating both scalars. + virtual bool trySetDynamicPerspectiveState(const DynamicPerspectiveState& state) override + { + if (!hlsl::CCameraMathUtilities::isFiniteScalar(state.baseFov) || !hlsl::CCameraMathUtilities::isFiniteScalar(state.referenceDistance) || state.referenceDistance <= 0.f) + return false; + + m_baseFov = state.baseFov; + m_referenceDistance = state.referenceDistance; + return true; + } + /// @brief Return the stable user-facing identifier for this concrete camera kind. + virtual std::string_view getIdentifier() const override { return "Dolly Zoom Camera"; } + +private: + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate; + static inline constexpr float DefaultBaseFovDeg = 40.0f; + static inline constexpr float MinDynamicFovDeg = 10.0f; + static inline constexpr float MaxDynamicFovDeg = 150.0f; + + float m_baseFov = DefaultBaseFovDeg; + float m_referenceDistance = 1.0f; +}; + +} + +#endif + diff --git a/include/nbl/ext/Cameras/CFPSCamera.hpp b/include/nbl/ext/Cameras/CFPSCamera.hpp new file mode 100644 index 0000000000..8e0735ad52 --- /dev/null +++ b/include/nbl/ext/Cameras/CFPSCamera.hpp @@ -0,0 +1,126 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_FPS_CAMERA_HPP_ +#define _C_FPS_CAMERA_HPP_ + +#include + +#include "ICamera.hpp" + +namespace nbl::core +{ + +/// @brief Free-position camera with world-space translation and yaw/pitch rotation. +/// +/// The runtime state consists of position plus an upright orientation derived +/// from yaw and pitch. Reference-frame application rejects arbitrary roll and +/// rebuilds the legal FPS orientation from the extracted forward axis. +class CFPSCamera final : public ICamera +{ +public: + using base_t = ICamera; + struct SFpsCameraDefaults final + { + static inline constexpr float RollValidationEpsilonDeg = 1.e-4f; + static inline constexpr float StraightRollDeg = 0.0f; + static inline constexpr float InvertedRollDeg = 180.0f; + }; + + CFPSCamera(const hlsl::float64_t3& position, const hlsl::camera_quaternion_t& orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion()) + : base_t(), m_gimbal(typename base_t::CGimbal::base_t::SCreationParameters{ .position = position, .orientation = orientation }) + { + m_gimbal.begin(); + { + const auto pitchYaw = hlsl::CCameraMathUtilities::getPitchYawFromForwardVector(m_gimbal.getZAxis()); + m_gimbal.setOrientation(hlsl::CCameraMathUtilities::makeQuaternionFromEulerRadiansYXZ(hlsl::float64_t3(pitchYaw.x, pitchYaw.y, 0.0))); + } + m_gimbal.end(); + } + ~CFPSCamera() = default; + + const typename base_t::CGimbal& getGimbal() override + { + return m_gimbal; + } + + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + CReferenceTransform reference; + if (not m_gimbal.extractReferenceTransform(&reference, referenceFrame)) + return false; + + auto validateReference = [&]() + { + if (referenceFrame) + { + const float roll = static_cast(hlsl::degrees(hlsl::CCameraMathUtilities::getQuaternionEulerRadiansYXZ(reference.orientation).z)); + const bool matchesStraightRoll = + hlsl::CCameraMathUtilities::getWrappedAngleDistanceDegrees(roll, SFpsCameraDefaults::StraightRollDeg) <= SFpsCameraDefaults::RollValidationEpsilonDeg; + const bool matchesInvertedRoll = + hlsl::CCameraMathUtilities::getWrappedAngleDistanceDegrees(roll, SFpsCameraDefaults::InvertedRollDeg) <= SFpsCameraDefaults::RollValidationEpsilonDeg; + + if (!(matchesStraightRoll || matchesInvertedRoll)) + return false; + } + + return true; + }; + + auto impulse = m_gimbal.accumulate(virtualEvents); + + bool manipulated = true; + + m_gimbal.begin(); + { + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const auto pitchYaw = hlsl::CCameraMathUtilities::getPitchYawFromForwardVector(hlsl::float64_t3(reference.frame[2])); + const float newPitch = std::clamp(static_cast(pitchYaw.x + scaleVirtualRotation(impulse.dVirtualRotation.x)), MinVerticalAngle, MaxVerticalAngle); + const float newYaw = static_cast(pitchYaw.y + scaleVirtualRotation(impulse.dVirtualRotation.y)); + + if (validateReference()) + m_gimbal.setOrientation(hlsl::CCameraMathUtilities::makeQuaternionFromEulerRadiansYXZ(hlsl::float64_t3(newPitch, newYaw, 0.0f))); + m_gimbal.setPosition(hlsl::float64_t3(reference.frame[3]) + hlsl::CCameraMathUtilities::rotateVectorByQuaternion(reference.orientation, hlsl::float64_t3(deltaTranslation))); + } + m_gimbal.end(); + + manipulated &= bool(m_gimbal.getManipulationCounter()); + + if (manipulated) + m_gimbal.updateView(); + + return manipulated; + } + + virtual uint32_t getAllowedVirtualEvents() const override + { + return AllowedVirtualEvents; + } + + virtual CameraKind getKind() const override + { + return CameraKind::FPS; + } + + virtual std::string_view getIdentifier() const override + { + return "FPS Camera"; + } + +private: + + typename base_t::CGimbal m_gimbal; + + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; + static inline constexpr float MaxVerticalAngle = static_cast(hlsl::SCameraViewRigDefaults::FpsVerticalPitchLimitRad); + static inline constexpr float MinVerticalAngle = -MaxVerticalAngle; +}; + +} + +#endif // _C_FPS_CAMERA_HPP_ + diff --git a/include/nbl/ext/Cameras/CFreeLockCamera.hpp b/include/nbl/ext/Cameras/CFreeLockCamera.hpp new file mode 100644 index 0000000000..40a4968c8c --- /dev/null +++ b/include/nbl/ext/Cameras/CFreeLockCamera.hpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2024 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_FREE_CAMERA_HPP_ +#define _C_FREE_CAMERA_HPP_ + +#include "ICamera.hpp" + +namespace nbl::core +{ + +/// @brief Free-position camera that allows full yaw/pitch/roll rotation. +class CFreeCamera final : public ICamera +{ +public: + using base_t = ICamera; + + CFreeCamera(const hlsl::float64_t3& position, const hlsl::camera_quaternion_t& orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion()) + : base_t(), m_gimbal(typename base_t::CGimbal::base_t::SCreationParameters{ .position = position, .orientation = orientation }) {} + ~CFreeCamera() = default; + + const typename base_t::CGimbal& getGimbal() override + { + return m_gimbal; + } + + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + CReferenceTransform reference; + if (not m_gimbal.extractReferenceTransform(&reference, referenceFrame)) + return false; + + auto impulse = m_gimbal.accumulate(virtualEvents); + + bool manipulated = true; + + m_gimbal.begin(); + { + const auto deltaRotation = scaleVirtualRotation(impulse.dVirtualRotation); + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const auto pitch = hlsl::CCameraMathUtilities::makeQuaternionFromAxisAngle(hlsl::normalize(hlsl::float64_t3(reference.frame[0])), deltaRotation.x); + const auto yaw = hlsl::CCameraMathUtilities::makeQuaternionFromAxisAngle(hlsl::normalize(hlsl::float64_t3(reference.frame[1])), deltaRotation.y); + const auto roll = hlsl::CCameraMathUtilities::makeQuaternionFromAxisAngle(hlsl::normalize(hlsl::float64_t3(reference.frame[2])), deltaRotation.z); + + m_gimbal.setOrientation(hlsl::CCameraMathUtilities::normalizeQuaternion(yaw * pitch * roll * reference.orientation)); + m_gimbal.setPosition(hlsl::float64_t3(reference.frame[3]) + hlsl::CCameraMathUtilities::rotateVectorByQuaternion(reference.orientation, hlsl::float64_t3(deltaTranslation))); + } + m_gimbal.end(); + + manipulated &= bool(m_gimbal.getManipulationCounter()); + + if (manipulated) + m_gimbal.updateView(); + + return manipulated; + } + + virtual uint32_t getAllowedVirtualEvents() const override + { + return AllowedVirtualEvents; + } + + virtual CameraKind getKind() const override + { + return CameraKind::Free; + } + + virtual std::string_view getIdentifier() const override + { + return "Free-Look Camera"; + } + +private: + typename base_t::CGimbal m_gimbal; + + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; +}; + +} + +#endif // _C_FREE_CAMERA_HPP_ diff --git a/include/nbl/ext/Cameras/CGeneralPurposeGimbal.hpp b/include/nbl/ext/Cameras/CGeneralPurposeGimbal.hpp new file mode 100644 index 0000000000..d0f7c72b92 --- /dev/null +++ b/include/nbl/ext/Cameras/CGeneralPurposeGimbal.hpp @@ -0,0 +1,24 @@ +#ifndef _NBL_CGENERAL_PURPOSE_GIMBAL_HPP_ +#define _NBL_CGENERAL_PURPOSE_GIMBAL_HPP_ + +#include "IGimbal.hpp" + +namespace nbl::core +{ + /// @brief Minimal concrete gimbal wrapper for code that only needs the generic `IGimbal` behavior. + /// + /// The class exists mainly as a convenient instantiable type when no additional + /// camera-specific state or manipulation policy is required on top of `IGimbal`. + template + class CGeneralPurposeGimbal : public IGimbal + { + public: + using base_t = IGimbal; + + /// @brief Construct the gimbal from an initial world-space pose. + CGeneralPurposeGimbal(typename base_t::SCreationParameters&& parameters) : base_t(std::move(parameters)) {} + ~CGeneralPurposeGimbal() = default; + }; +} + +#endif // _NBL_CGENERAL_PURPOSE_GIMBAL_HPP_ diff --git a/include/nbl/ext/Cameras/CGimbalInputBinder.hpp b/include/nbl/ext/Cameras/CGimbalInputBinder.hpp new file mode 100644 index 0000000000..935ad5dcf0 --- /dev/null +++ b/include/nbl/ext/Cameras/CGimbalInputBinder.hpp @@ -0,0 +1,65 @@ +#ifndef _NBL_C_GIMBAL_INPUT_BINDER_HPP_ +#define _NBL_C_GIMBAL_INPUT_BINDER_HPP_ + +#include + +#include "IGimbalInputProcessor.hpp" + +namespace nbl::ui +{ + +/// @brief High-level runtime binder for consumers and viewport glue. +/// +/// It owns active runtime mappings and collects one frame of virtual events +/// from raw keyboard, mouse, and ImGuizmo input. +class CGimbalInputBinder final : public IGimbalInputProcessor +{ +public: + using base_t = IGimbalInputProcessor; + using base_t::base_t; + using input_keyboard_event_t = base_t::input_keyboard_event_t; + using input_mouse_event_t = base_t::input_mouse_event_t; + using input_imguizmo_event_t = base_t::input_imguizmo_event_t; + + struct SCollectedVirtualEvents + { + /// @brief Concatenated output buffer plus per-domain counts for diagnostics. + std::vector events; + uint32_t keyboardCount = 0u; + uint32_t mouseCount = 0u; + uint32_t imguizmoCount = 0u; + + uint32_t totalCount() const; + }; + + /// @brief Translate one frame of external keyboard, mouse, and ImGuizmo input into virtual events. + void clearActiveBindings(); + + void clearBindingLayout(); + + void copyActiveBindingsFromLayout(const IGimbalBindingLayout& layout); + + void copyBindingLayoutFrom(const IGimbalBindingLayout& layout); + + void copyActiveBindingsToLayout(IGimbalBindingLayout& layout) const; + + void copyBindingLayoutTo(IGimbalBindingLayout& layout) const; + + SCollectedVirtualEvents collectVirtualEvents( + const std::chrono::microseconds nextPresentationTimeStamp, + const SUpdateParameters parameters = {}); + +private: + template + inline static Map sanitizeMapping(const Map& source) + { + Map result; + for (const auto& [code, hash] : source) + result.emplace(code, typename Map::mapped_type(hash.event.type, hash.magnitudeScale)); + return result; + } +}; + +} // namespace nbl::ui + +#endif // _NBL_C_GIMBAL_INPUT_BINDER_HPP_ diff --git a/include/nbl/ext/Cameras/CIsometricCamera.hpp b/include/nbl/ext/Cameras/CIsometricCamera.hpp new file mode 100644 index 0000000000..673a9f37c8 --- /dev/null +++ b/include/nbl/ext/Cameras/CIsometricCamera.hpp @@ -0,0 +1,77 @@ +#ifndef _C_ISOMETRIC_CAMERA_HPP_ +#define _C_ISOMETRIC_CAMERA_HPP_ + +#include +#include + +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera locked to the shared isometric yaw and pitch. +/// +/// Translation moves the tracked target in the current view plane while the +/// authored isometric orientation stays fixed. Distance changes are still allowed. +class CIsometricCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + CIsometricCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(position, target) + { + m_orbitUv = hlsl::float64_t2(IsoYaw, IsoPitch); + applyPose(); + } + ~CIsometricCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Apply one frame of planar target translation and distance changes while preserving the fixed isometric angles. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceIsometricState(reference, resolvedState)) + { + return false; + } + + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const double deltaDistance = scaleUnscaledVirtualTranslation(impulse.dVirtualTranslate.z); + + m_orbitUv = hlsl::float64_t2(IsoYaw, IsoPitch); + m_distance = std::clamp(m_distance + static_cast(deltaDistance), MinDistance, MaxDistance); + + const auto basis = computeBasis(m_orbitUv, m_distance); + applyPlanarTargetTranslation(deltaTranslation, basis); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::Isometric; } + /// @brief Return the stable user-facing identifier for this concrete camera kind. + virtual std::string_view getIdentifier() const override { return "Isometric Camera"; } + +private: + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate; + static inline constexpr double IsoYaw = SCameraTargetRelativeRigDefaults::IsometricYawRad; + static inline const double IsoPitch = SCameraTargetRelativeRigDefaults::IsometricPitchRad; +}; + +} + +#endif diff --git a/include/nbl/ext/Cameras/CLinearProjection.hpp b/include/nbl/ext/Cameras/CLinearProjection.hpp new file mode 100644 index 0000000000..d6e0dd4e05 --- /dev/null +++ b/include/nbl/ext/Cameras/CLinearProjection.hpp @@ -0,0 +1,59 @@ +#ifndef _NBL_C_LINEAR_PROJECTION_HPP_ +#define _NBL_C_LINEAR_PROJECTION_HPP_ + +#include "ILinearProjection.hpp" +#include "IRange.hpp" + +namespace nbl::core +{ + /// @brief Range-backed concrete implementation of `ILinearProjection`. + /// + /// The template owns a caller-selected contiguous container of linear projection + /// entries and exposes it through the generic `ILinearProjection` interface. + template ProjectionsRange> + class CLinearProjection : public ILinearProjection + { + public: + using ILinearProjection::ILinearProjection; + + CLinearProjection() = default; + + /// @brief Create a projection wrapper only when a valid camera instance is available. + inline static core::smart_refctd_ptr create(core::smart_refctd_ptr&& camera) + { + if (!camera) + return nullptr; + + return core::smart_refctd_ptr(new CLinearProjection(core::smart_refctd_ptr(camera)), core::dont_grab); + } + + /// @brief Return the number of stored linear projection entries. + virtual uint32_t getLinearProjectionCount() const override + { + return static_cast(m_projections.size()); + } + + /// @brief Return one stored projection entry by index. + virtual const CProjection& getLinearProjection(uint32_t index) const override + { + assert(index < m_projections.size()); + return m_projections[index]; + } + + /// @brief Expose mutable access to the owned projection range. + inline std::span getLinearProjections() + { + return std::span(m_projections.data(), m_projections.size()); + } + + private: + CLinearProjection(core::smart_refctd_ptr&& camera) + : ILinearProjection(core::smart_refctd_ptr(camera)) {} + virtual ~CLinearProjection() = default; + + ProjectionsRange m_projections; + }; + +} // nbl::hlsl namespace + +#endif // _NBL_C_LINEAR_PROJECTION_HPP_ diff --git a/include/nbl/ext/Cameras/COrbitCamera.hpp b/include/nbl/ext/Cameras/COrbitCamera.hpp new file mode 100644 index 0000000000..df411c4b81 --- /dev/null +++ b/include/nbl/ext/Cameras/COrbitCamera.hpp @@ -0,0 +1,83 @@ +#ifndef _C_ORBIT_CAMERA_HPP_ +#define _C_ORBIT_CAMERA_HPP_ + +#include +#include +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera with state `(target, orbitUv, distance)`. +/// +/// Runtime input updates only orbit yaw, orbit pitch, and camera distance. +/// The target position remains unchanged during `manipulate(...)`. +class COrbitCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + COrbitCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(position, target) + { + m_distance = std::clamp(hlsl::length(m_targetPosition - position), MinDistance, MaxDistance); + applyPose(); + } + ~COrbitCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Apply one frame of orbit-angle and distance input around the current target. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (virtualEvents.empty() && !referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceTargetRelativeState(reference, resolvedState)) + { + return false; + } + + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const double deltaDistance = scaleUnscaledVirtualTranslation(impulse.dVirtualTranslate.z); + + m_orbitUv += hlsl::float64_t2(deltaTranslation.y, deltaTranslation.x); + + m_distance = std::clamp(m_distance + static_cast(deltaDistance), MinDistance, MaxDistance); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override + { + return AllowedVirtualEvents; + } + + virtual CameraKind getKind() const override + { + return CameraKind::Orbit; + } + + virtual std::string_view getIdentifier() const override + { + return "Orbit Camera"; + } + + static inline constexpr float MinDistance = base_t::MinDistance; + static inline constexpr float MaxDistance = base_t::MaxDistance; + + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate; +}; + +} + +#endif // _C_ORBIT_CAMERA_HPP_ diff --git a/include/nbl/ext/Cameras/CPathCamera.hpp b/include/nbl/ext/Cameras/CPathCamera.hpp new file mode 100644 index 0000000000..aa8b335584 --- /dev/null +++ b/include/nbl/ext/Cameras/CPathCamera.hpp @@ -0,0 +1,342 @@ +#ifndef _C_PATH_CAMERA_HPP_ +#define _C_PATH_CAMERA_HPP_ + +#include +#include + +#include "CCameraPathUtilities.hpp" +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Path-rig camera driven by typed `PathState` plus an injected path model. +/// +/// The public runtime path stays event-only through `manipulate(...)`. +/// `CPathCamera` only interprets the accumulated impulse through `m_pathModel` +/// instead of hardcoding one default target-relative mapping in the method body. +class CPathCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + using path_model_t = SCameraPathModel; + using path_limits_t = PathStateLimits; + + /// @brief Construct the path rig with the shared default path model and default limits. + CPathCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : CPathCamera(position, target, CCameraPathUtilities::makeDefaultPathModel(), CCameraPathUtilities::makeDefaultPathLimits()) + { + } + + /// @brief Construct the path rig with a caller-provided model and default limits. + CPathCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target, path_model_t pathModel) + : CPathCamera(position, target, std::move(pathModel), CCameraPathUtilities::makeDefaultPathLimits()) + { + } + + /// @brief Construct the path rig with the shared default model and caller-provided limits. + CPathCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target, path_limits_t pathLimits) + : CPathCamera(position, target, CCameraPathUtilities::makeDefaultPathModel(), pathLimits) + { + } + + /// @brief Construct the path rig with fully caller-provided model and path-state limits. + CPathCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target, path_model_t pathModel, path_limits_t pathLimits) + : base_t(position, target) + { + initializePathRig(position, std::move(pathModel), pathLimits); + } + + ~CPathCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Consume virtual events through the active path model and update the runtime pose from the resulting path state. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (virtualEvents.empty() && !referenceFrame) + return false; + + PathState nextPathState = m_pathState; + CReferenceTransform reference = {}; + const CReferenceTransform* resolvedReference = nullptr; + if (referenceFrame) + { + if (!m_gimbal.extractReferenceTransform(&reference, referenceFrame)) + return false; + resolvedReference = &reference; + if (!m_pathModel.resolveState || + !m_pathModel.resolveState( + m_targetPosition, + hlsl::float64_t3(reference.frame[3]), + m_pathLimits, + nullptr, + nextPathState)) + { + return false; + } + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + const SCameraPathControlContext context = { + .currentState = nextPathState, + .translation = scaleVirtualTranslation(impulse.dVirtualTranslate), + .rotation = scaleVirtualRotation(impulse.dVirtualRotation), + .targetPosition = m_targetPosition, + .reference = resolvedReference, + .limits = m_pathLimits + }; + + if (!m_pathModel.controlLaw || !m_pathModel.integrate) + return false; + + const auto stateDelta = m_pathModel.controlLaw(context); + if (!m_pathModel.integrate(nextPathState, stateDelta, m_pathLimits, nextPathState)) + return false; + + const auto previousPathState = m_pathState; + m_pathState = nextPathState; + bool manipulated = false; + if (refreshFromPathState(&manipulated)) + return manipulated; + + m_pathState = previousPathState; + refreshFromPathState(); + return false; + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::Path; } + virtual uint32_t getGoalStateMask() const override { return base_t::getGoalStateMask() | base_t::GoalStatePath; } + + /// @brief Query the current typed path state. + virtual bool tryGetPathState(PathState& out) const override + { + out = m_pathState; + return true; + } + + /// @brief Query the active path-state limits used by this camera instance. + virtual bool tryGetPathStateLimits(PathStateLimits& out) const override + { + out = m_pathLimits; + return true; + } + + /// @brief Query the derived spherical-target state corresponding to the current path-state evaluation. + virtual bool tryGetSphericalTargetState(typename base_t::SphericalTargetState& out) const override + { + out.target = m_targetPosition; + out.distance = m_distance; + out.orbitUv = m_orbitUv; + out.minDistance = static_cast(m_pathLimits.minDistance); + out.maxDistance = static_cast(m_pathLimits.maxDistance); + return true; + } + + /// @brief Replace only the tracked target position and rebuild the current path pose against it. + virtual bool trySetSphericalTarget(const hlsl::float64_t3& targetPosition) override + { + if (m_targetPosition == targetPosition) + return true; + + const auto previousTarget = m_targetPosition; + m_targetPosition = targetPosition; + if (refreshFromPathState()) + return true; + + m_targetPosition = previousTarget; + refreshFromPathState(); + return false; + } + + /// @brief Replace the current path state after sanitizing it through the active path model. + virtual bool trySetPathState(const PathState& state) override + { + if (!m_pathModel.resolveState) + return false; + + PathState sanitized = {}; + if (!m_pathModel.resolveState(m_targetPosition, m_gimbal.getPosition(), m_pathLimits, &state, sanitized)) + return false; + + const bool exact = CCameraPathUtilities::pathStatesNearlyEqual(sanitized, state, SCameraPathDefaults::ExactComparisonThresholds); + const auto previousState = m_pathState; + m_pathState = sanitized; + if (refreshFromPathState()) + return exact; + + m_pathState = previousState; + refreshFromPathState(); + return false; + } + + /// @brief Replace the derived path distance while preserving the rest of the typed path state. + virtual bool trySetSphericalDistance(float distance) override + { + SCameraPathDistanceUpdateResult distanceUpdate = {}; + if (!m_pathModel.updateDistance) + { + return false; + } + + const auto previousState = m_pathState; + if (!m_pathModel.updateDistance(distance, m_pathLimits, m_pathState, &distanceUpdate)) + return false; + if (!refreshFromPathState()) + { + m_pathState = previousState; + refreshFromPathState(); + return false; + } + + return distanceUpdate.exact; + } + + virtual std::string_view getIdentifier() const override { return SCameraPathDefaults::Identifier; } + + /// @brief Return the currently installed path model. + inline const path_model_t& getPathModel() const + { + return m_pathModel; + } + + /// @brief Return the current path-state limits enforced by this camera instance. + inline const path_limits_t& getPathStateLimits() const + { + return m_pathLimits; + } + + /// @brief Replace the active path-state limits after sanitizing the current path state against them. + inline bool setPathStateLimits(path_limits_t pathLimits) + { + if (!CCameraPathUtilities::sanitizePathLimits(pathLimits) || !m_pathModel.resolveState) + return false; + + PathState sanitizedState = {}; + if (!m_pathModel.resolveState(m_targetPosition, m_gimbal.getPosition(), pathLimits, &m_pathState, sanitizedState)) + return false; + + const auto previousLimits = m_pathLimits; + const auto previousState = m_pathState; + m_pathLimits = pathLimits; + m_pathState = sanitizedState; + if (refreshFromPathState()) + return true; + + m_pathLimits = previousLimits; + m_pathState = previousState; + refreshFromPathState(); + return false; + } + + /// @brief Replace the active path model after validating that it can resolve the current path state. + inline bool setPathModel(path_model_t pathModel) + { + if (!isPathModelComplete(pathModel)) + return false; + + PathState sanitized = {}; + if (!pathModel.resolveState(m_targetPosition, m_gimbal.getPosition(), m_pathLimits, &m_pathState, sanitized)) + return false; + + const auto previousModel = m_pathModel; + const auto previousState = m_pathState; + m_pathModel = std::move(pathModel); + m_pathState = sanitized; + if (refreshFromPathState()) + return true; + + m_pathModel = previousModel; + m_pathState = previousState; + refreshFromPathState(); + return false; + } + +private: + static inline constexpr auto AllowedVirtualEvents = + CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::RollLeft | CVirtualGimbalEvent::RollRight; + + /// @brief Check whether a path model provides all callbacks required by the runtime camera. + static inline bool isPathModelComplete(const path_model_t& pathModel) + { + return pathModel.resolveState && pathModel.controlLaw && pathModel.integrate && pathModel.evaluate && pathModel.updateDistance; + } + + /// @brief Attempt to initialize the runtime path state and pose from one model/limit pair. + inline bool tryInitializePathRig(const hlsl::float64_t3& position, path_model_t pathModel, path_limits_t pathLimits) + { + if (!CCameraPathUtilities::sanitizePathLimits(pathLimits)) + return false; + + if (!isPathModelComplete(pathModel)) + return false; + + PathState resolvedState = {}; + if (!pathModel.resolveState(m_targetPosition, position, pathLimits, nullptr, resolvedState)) + return false; + + m_pathLimits = pathLimits; + m_pathModel = std::move(pathModel); + m_pathState = resolvedState; + return refreshFromPathState(); + } + + /// @brief Initialize the path rig with graceful fallback to the shared default model and limits. + inline void initializePathRig(const hlsl::float64_t3& position, path_model_t pathModel, path_limits_t pathLimits) + { + path_limits_t sanitizedLimits = pathLimits; + const bool hasCustomLimits = CCameraPathUtilities::sanitizePathLimits(sanitizedLimits); + if (!hasCustomLimits) + sanitizedLimits = CCameraPathUtilities::makeDefaultPathLimits(); + + if (tryInitializePathRig(position, std::move(pathModel), sanitizedLimits)) + return; + + if (tryInitializePathRig(position, CCameraPathUtilities::makeDefaultPathModel(), sanitizedLimits)) + return; + + m_pathLimits = CCameraPathUtilities::makeDefaultPathLimits(); + m_pathModel = CCameraPathUtilities::makeDefaultPathModel(); + m_pathState = CCameraPathUtilities::makeDefaultPathState(m_pathLimits.minU); + m_pathModel.resolveState(m_targetPosition, position, m_pathLimits, nullptr, m_pathState); + refreshFromPathState(); + } + + path_model_t m_pathModel = CCameraPathUtilities::makeDefaultPathModel(); + path_limits_t m_pathLimits = CCameraPathUtilities::makeDefaultPathLimits(); + PathState m_pathState = CCameraPathUtilities::makeDefaultPathState(CCameraPathUtilities::makeDefaultPathLimits().minU); + + /// @brief Evaluate the current path state into a canonical pose and write it back to the runtime gimbal. + bool refreshFromPathState(bool* outManipulated = nullptr) + { + if (!m_pathModel.evaluate) + return false; + + SCameraCanonicalPathState canonicalPathState = {}; + if (!m_pathModel.evaluate(m_targetPosition, m_pathState, m_pathLimits, canonicalPathState)) + return false; + + m_distance = canonicalPathState.targetRelative.distance; + m_orbitUv = canonicalPathState.targetRelative.orbitUv; + + m_gimbal.begin(); + { + m_gimbal.setPosition(canonicalPathState.pose.position); + m_gimbal.setOrientation(canonicalPathState.pose.orientation); + } + m_gimbal.end(); + + const bool manipulated = bool(m_gimbal.getManipulationCounter()); + if (manipulated) + m_gimbal.updateView(); + + if (outManipulated) + *outManipulated = manipulated; + return true; + } +}; + +} + +#endif diff --git a/include/nbl/ext/Cameras/CPlanarProjection.hpp b/include/nbl/ext/Cameras/CPlanarProjection.hpp new file mode 100644 index 0000000000..5407b9dd4e --- /dev/null +++ b/include/nbl/ext/Cameras/CPlanarProjection.hpp @@ -0,0 +1,56 @@ +#ifndef _NBL_C_PLANAR_PROJECTION_HPP_ +#define _NBL_C_PLANAR_PROJECTION_HPP_ + +#include "IPlanarProjection.hpp" +#include "IRange.hpp" + +namespace nbl::core +{ + /// @brief Range-backed concrete implementation of `IPlanarProjection`. + /// + /// The template owns a caller-selected contiguous container of planar + /// projection entries together with their viewport-local binding layouts. + template ProjectionsRange> + class CPlanarProjection : public IPlanarProjection + { + public: + virtual ~CPlanarProjection() = default; + + /// @brief Create a planar projection wrapper only when a valid camera instance is available. + inline static core::smart_refctd_ptr create(core::smart_refctd_ptr&& camera) + { + if (!camera) + return nullptr; + + return core::smart_refctd_ptr(new CPlanarProjection(core::smart_refctd_ptr(camera)), core::dont_grab); + } + + /// @brief Return the number of stored planar projection entries. + virtual uint32_t getLinearProjectionCount() const override + { + return static_cast(m_projections.size()); + } + + /// @brief Return one stored planar projection entry through the linear base interface. + virtual const ILinearProjection::CProjection& getLinearProjection(uint32_t index) const override + { + assert(index < m_projections.size()); + return m_projections[index]; + } + + /// @brief Expose mutable access to the owned planar projection range. + inline ProjectionsRange& getPlanarProjections() + { + return m_projections; + } + + protected: + CPlanarProjection(core::smart_refctd_ptr&& camera) + : IPlanarProjection(core::smart_refctd_ptr(camera)) {} + + ProjectionsRange m_projections; + }; + +} // nbl::hlsl namespace + +#endif // _NBL_C_PLANAR_PROJECTION_HPP_ diff --git a/include/nbl/ext/Cameras/CSphericalTargetCamera.hpp b/include/nbl/ext/Cameras/CSphericalTargetCamera.hpp new file mode 100644 index 0000000000..54fd43a2c4 --- /dev/null +++ b/include/nbl/ext/Cameras/CSphericalTargetCamera.hpp @@ -0,0 +1,243 @@ +#ifndef _C_SPHERICAL_TARGET_CAMERA_HPP_ +#define _C_SPHERICAL_TARGET_CAMERA_HPP_ + +#include +#include "CCameraTargetRelativeUtilities.hpp" + +namespace nbl::core +{ + +/// @brief Common base for target-relative cameras represented by target position, distance, and `orbitUv`. +/// +/// Derived cameras keep the same target-relative storage but apply different +/// constraints and event policies in `manipulate(...)`. +class CSphericalTargetCamera : public ICamera +{ +public: + using base_t = ICamera; + + CSphericalTargetCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(), m_targetPosition(target), m_distance(SCameraTargetRelativeRigDefaults::InitialDistance), + m_gimbal(typename base_t::CGimbal::base_t::SCreationParameters{ + .position = position, + .orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion() + }) + { + initFromPosition(position); + } + ~CSphericalTargetCamera() = default; + + inline bool setDistance(float d) + { + const auto clamped = std::clamp(d, MinDistance, MaxDistance); + const bool ok = clamped == d; + if (m_distance == clamped) + return ok; + m_distance = clamped; + applyPose(); + return ok; + } + + inline void target(const hlsl::float64_t3& p) + { + if (m_targetPosition == p) + return; + m_targetPosition = p; + applyPose(); + } + inline hlsl::float64_t3 getTarget() const { return m_targetPosition; } + + inline float getDistance() const { return m_distance; } + inline const hlsl::float64_t2& getOrbitUv() const { return m_orbitUv; } + + static inline constexpr float MinDistance = SCameraTargetRelativeTraits::MinDistance; + static inline constexpr float MaxDistance = SCameraTargetRelativeTraits::DefaultMaxDistance; + + virtual uint32_t getCapabilities() const override + { + return base_t::SphericalTarget; + } + + virtual bool tryGetSphericalTargetState(typename base_t::SphericalTargetState& out) const override + { + out.target = m_targetPosition; + out.distance = m_distance; + out.orbitUv = m_orbitUv; + out.minDistance = MinDistance; + out.maxDistance = MaxDistance; + return true; + } + + virtual bool trySetSphericalTarget(const hlsl::float64_t3& targetPosition) override + { + target(targetPosition); + return true; + } + + virtual bool trySetSphericalDistance(float distance) override + { + return setDistance(distance); + } + +protected: + using SphericalBasis = SCameraTargetRelativeBasis; + + /// @brief Return the current canonical target-relative state stored by the spherical rig. + inline SCameraTargetRelativeState currentTargetRelativeState() const + { + return { + .target = m_targetPosition, + .orbitUv = m_orbitUv, + .distance = m_distance + }; + } + + /// @brief Replace the stored target-relative state without touching the gimbal pose yet. + inline void adoptTargetRelativeState(const SCameraTargetRelativeState& state) + { + m_targetPosition = state.target; + m_orbitUv = state.orbitUv; + m_distance = state.distance; + } + + /// @brief Extract one rigid reference transform from the optional external override or the current gimbal pose. + inline bool tryExtractReferenceTransform(CReferenceTransform& outReference, const hlsl::float64_t4x4* referenceFrame) + { + return m_gimbal.extractReferenceTransform(&outReference, referenceFrame); + } + + /// @brief Resolve the current target-relative state from one rigid reference position around the current target. + inline bool tryResolveReferenceTargetRelativeState(const CReferenceTransform& reference, SCameraTargetRelativeState& outState) const + { + return CCameraTargetRelativeUtilities::tryBuildTargetRelativeStateFromPosition( + m_targetPosition, + hlsl::float64_t3(reference.frame[3]), + MinDistance, + MaxDistance, + outState); + } + + /// @brief Resolve the top-down yaw encoded by a rigid reference orientation. + static inline double resolveTopDownYawFromReference(const CReferenceTransform& reference, const double fallbackYaw) + { + const auto basis = hlsl::CCameraMathUtilities::getQuaternionBasisMatrix(reference.orientation); + const auto planarUp = hlsl::float64_t2(basis[1].x, basis[1].y); + constexpr auto Epsilon = static_cast(SCameraToolingThresholds::TinyScalarEpsilon); + if (!hlsl::CCameraMathUtilities::isNearlyZeroVector(planarUp, Epsilon)) + return hlsl::atan2(planarUp.y, planarUp.x); + + const auto planarRight = hlsl::float64_t2(basis[0].x, basis[0].y); + if (!hlsl::CCameraMathUtilities::isNearlyZeroVector(planarRight, Epsilon)) + return hlsl::atan2(planarRight.x, -planarRight.y); + + return fallbackYaw; + } + + /// @brief Project one rigid reference pose onto the legal top-down state manifold around the current target. + inline bool tryResolveReferenceTopDownState(const CReferenceTransform& reference, SCameraTargetRelativeState& outState) const + { + const auto offset = hlsl::float64_t3(reference.frame[3]) - m_targetPosition; + const auto distance = hlsl::length(offset); + if (!hlsl::CCameraMathUtilities::isFiniteScalar(distance) || + distance <= static_cast(SCameraToolingThresholds::TinyScalarEpsilon)) + { + return false; + } + + outState = currentTargetRelativeState(); + outState.distance = static_cast(std::clamp( + distance, + static_cast(MinDistance), + static_cast(MaxDistance))); + outState.orbitUv.x = resolveTopDownYawFromReference(reference, m_orbitUv.x); + outState.orbitUv.y = SCameraTargetRelativeRigDefaults::TopDownPitchRad; + return true; + } + + /// @brief Project one rigid reference pose onto the legal fixed-angle isometric manifold around the current target. + inline bool tryResolveReferenceIsometricState(const CReferenceTransform& reference, SCameraTargetRelativeState& outState) const + { + if (!tryResolveReferenceTargetRelativeState(reference, outState)) + return false; + + outState.orbitUv = hlsl::float64_t2( + SCameraTargetRelativeRigDefaults::IsometricYawRad, + SCameraTargetRelativeRigDefaults::IsometricPitchRad); + return true; + } + + inline SphericalBasis computeBasis(const hlsl::float64_t2& orbitUv, float distance) const + { + SphericalBasis basis; + const SCameraTargetRelativeState state = { + .target = m_targetPosition, + .orbitUv = orbitUv, + .distance = distance + }; + if (!CCameraTargetRelativeUtilities::tryBuildTargetRelativeBasis(state, MinDistance, MaxDistance, basis)) + return basis; + return basis; + } + + inline void initFromPosition(const hlsl::float64_t3& position) + { + SCameraTargetRelativeState state = {}; + if (!CCameraTargetRelativeUtilities::tryBuildTargetRelativeStateFromPosition(m_targetPosition, position, MinDistance, MaxDistance, state)) + { + m_distance = MinDistance; + m_orbitUv = hlsl::float64_t2(0.0); + return; + } + + m_distance = state.distance; + m_orbitUv = state.orbitUv; + } + + inline void applyPlanarTargetTranslation(const hlsl::float64_t3& deltaTranslation, const SphericalBasis& basis) + { + if (!hlsl::CCameraMathUtilities::hasPlanarDeltaXY(deltaTranslation, static_cast(SCameraToolingThresholds::TinyScalarEpsilon))) + return; + + m_targetPosition += hlsl::CCameraMathUtilities::transformLocalVectorToWorldBasis( + hlsl::float64_t3(deltaTranslation.x, deltaTranslation.y, 0.0), + basis.right, + basis.up, + basis.forward); + } + + inline bool applyPose() + { + const SCameraTargetRelativeState state = { + .target = m_targetPosition, + .orbitUv = m_orbitUv, + .distance = m_distance + }; + SCameraTargetRelativePose pose = {}; + if (!CCameraTargetRelativeUtilities::tryBuildTargetRelativePoseFromState(state, MinDistance, MaxDistance, pose)) + return false; + m_distance = static_cast(pose.appliedDistance); + + m_gimbal.begin(); + { + m_gimbal.setPosition(pose.position); + m_gimbal.setOrientation(pose.orientation); + } + m_gimbal.end(); + + const bool manipulated = bool(m_gimbal.getManipulationCounter()); + if (manipulated) + m_gimbal.updateView(); + + return manipulated; + } + + hlsl::float64_t3 m_targetPosition; + float m_distance; + typename base_t::CGimbal m_gimbal; + hlsl::float64_t2 m_orbitUv = hlsl::float64_t2(0.0); +}; + +} + +#endif + diff --git a/include/nbl/ext/Cameras/CTopDownCamera.hpp b/include/nbl/ext/Cameras/CTopDownCamera.hpp new file mode 100644 index 0000000000..63d16872c3 --- /dev/null +++ b/include/nbl/ext/Cameras/CTopDownCamera.hpp @@ -0,0 +1,78 @@ +#ifndef _C_TOPDOWN_CAMERA_HPP_ +#define _C_TOPDOWN_CAMERA_HPP_ + +#include +#include + +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera constrained to look straight down at the tracked target. +/// +/// Yaw may still rotate the view around the vertical axis, while pitch is fixed to +/// the top-down angle and translation moves the tracked target in the view plane. +class CTopDownCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + CTopDownCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(position, target) + { + m_orbitUv.y = TopDownPitch; + applyPose(); + } + ~CTopDownCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Apply one frame of top-down yaw rotation, planar translation, and distance changes. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceTopDownState(reference, resolvedState)) + { + return false; + } + + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + + const auto deltaRotation = scaleVirtualRotation(impulse.dVirtualRotation); + const auto deltaTranslation = scaleVirtualTranslation(impulse.dVirtualTranslate); + const double deltaDistance = scaleUnscaledVirtualTranslation(impulse.dVirtualTranslate.z); + + m_orbitUv.x += deltaRotation.y; + m_orbitUv.y = TopDownPitch; + m_distance = std::clamp(m_distance + static_cast(deltaDistance), MinDistance, MaxDistance); + + const auto basis = computeBasis(m_orbitUv, m_distance); + applyPlanarTargetTranslation(deltaTranslation, basis); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::TopDown; } + /// @brief Return the stable user-facing identifier for this concrete camera kind. + virtual std::string_view getIdentifier() const override { return "Top-Down Camera"; } + +private: + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; + static inline constexpr double TopDownPitch = SCameraTargetRelativeRigDefaults::TopDownPitchRad; +}; + +} + +#endif diff --git a/include/nbl/ext/Cameras/CTurntableCamera.hpp b/include/nbl/ext/Cameras/CTurntableCamera.hpp new file mode 100644 index 0000000000..dc44d23a3c --- /dev/null +++ b/include/nbl/ext/Cameras/CTurntableCamera.hpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2024 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_TURNTABLE_CAMERA_HPP_ +#define _C_TURNTABLE_CAMERA_HPP_ + +#include +#include + +#include "CSphericalTargetCamera.hpp" + +namespace nbl::core +{ + +/// @brief Target-relative camera that behaves like a classic turntable around a fixed target. +/// +/// The camera exposes yaw, bounded pitch, and distance changes while keeping the +/// target fixed in space and avoiding arbitrary planar target translation. +class CTurntableCamera final : public CSphericalTargetCamera +{ +public: + using base_t = CSphericalTargetCamera; + + CTurntableCamera(const hlsl::float64_t3& position, const hlsl::float64_t3& target) + : base_t(position, target) + { + m_orbitUv.y = std::clamp(m_orbitUv.y, MinPitch, MaxPitch); + applyPose(); + } + ~CTurntableCamera() = default; + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + /// @brief Apply one frame of yaw, bounded pitch, and distance input around the tracked target. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) override + { + if (not virtualEvents.size() and not referenceFrame) + return false; + + if (referenceFrame) + { + CReferenceTransform reference = {}; + SCameraTargetRelativeState resolvedState = {}; + if (!tryExtractReferenceTransform(reference, referenceFrame) || + !tryResolveReferenceTargetRelativeState(reference, resolvedState)) + { + return false; + } + + resolvedState.orbitUv.y = std::clamp(resolvedState.orbitUv.y, MinPitch, MaxPitch); + adoptTargetRelativeState(resolvedState); + } + + const auto impulse = m_gimbal.accumulate(virtualEvents); + + const double deltaYaw = scaleVirtualRotation(impulse.dVirtualRotation.y); + const double deltaPitch = scaleVirtualRotation(impulse.dVirtualRotation.x); + const double deltaDistance = scaleUnscaledVirtualTranslation(impulse.dVirtualTranslate.z); + + m_orbitUv.x += deltaYaw; + m_orbitUv.y = std::clamp(m_orbitUv.y + deltaPitch, MinPitch, MaxPitch); + m_distance = std::clamp(m_distance + static_cast(deltaDistance), MinDistance, MaxDistance); + + return applyPose(); + } + + virtual uint32_t getAllowedVirtualEvents() const override { return AllowedVirtualEvents; } + virtual CameraKind getKind() const override { return CameraKind::Turntable; } + /// @brief Return the stable user-facing identifier for this concrete camera kind. + virtual std::string_view getIdentifier() const override { return "Turntable Camera"; } + + static inline constexpr float MinDistance = base_t::MinDistance; + static inline constexpr float MaxDistance = base_t::MaxDistance; + +private: + + static inline constexpr auto AllowedVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; + static inline constexpr double MaxPitch = SCameraTargetRelativeRigDefaults::TurntablePitchLimitRad; + static inline constexpr double MinPitch = -MaxPitch; +}; + +} + +#endif diff --git a/include/nbl/ext/Cameras/CVirtualGimbalEvent.hpp b/include/nbl/ext/Cameras/CVirtualGimbalEvent.hpp new file mode 100644 index 0000000000..e1384ed426 --- /dev/null +++ b/include/nbl/ext/Cameras/CVirtualGimbalEvent.hpp @@ -0,0 +1,159 @@ +#ifndef _NBL_C_VIRTUAL_GIMBAL_EVENT_HPP_ +#define _NBL_C_VIRTUAL_GIMBAL_EVENT_HPP_ + +#include +#include +#include + +#include "nbl/builtin/hlsl/cpp_compat/vector.hlsl" +#include "nbl/core/math/intutil.h" + +namespace nbl::core +{ + +/// @brief One semantic camera command passed to `ICamera::manipulate(...)`. +/// +/// `type` selects the command family. `magnitude` stores the non-negative +/// source-normalized scalar amount for that command. Input binders convert +/// raw keyboard, mouse, scroll, and ImGuizmo data into this representation +/// before the camera sees it. Cameras then convert these virtual magnitudes +/// into camera-local motion through their runtime scales and family-specific +/// legalization rules. +struct CVirtualGimbalEvent +{ + /// @brief Bitmask identifiers for semantic movement, rotation, and scale commands. + enum VirtualEventType : uint32_t + { + None = 0, + + MoveForward = core::createBitmask({ 0 }), + MoveBackward = core::createBitmask({ 1 }), + MoveLeft = core::createBitmask({ 2 }), + MoveRight = core::createBitmask({ 3 }), + MoveUp = core::createBitmask({ 4 }), + MoveDown = core::createBitmask({ 5 }), + TiltUp = core::createBitmask({ 6 }), + TiltDown = core::createBitmask({ 7 }), + PanLeft = core::createBitmask({ 8 }), + PanRight = core::createBitmask({ 9 }), + RollLeft = core::createBitmask({ 10 }), + RollRight = core::createBitmask({ 11 }), + ScaleXInc = core::createBitmask({ 12 }), + ScaleXDec = core::createBitmask({ 13 }), + ScaleYInc = core::createBitmask({ 14 }), + ScaleYDec = core::createBitmask({ 15 }), + ScaleZInc = core::createBitmask({ 16 }), + ScaleZDec = core::createBitmask({ 17 }), + + EventsCount = 18, + + Translate = MoveForward | MoveBackward | MoveLeft | MoveRight | MoveUp | MoveDown, + Rotate = TiltUp | TiltDown | PanLeft | PanRight | RollLeft | RollRight, + Scale = ScaleXInc | ScaleXDec | ScaleYInc | ScaleYDec | ScaleZInc | ScaleZDec, + + All = Translate | Rotate | Scale + }; + + /// @brief Scalar type used to encode one event magnitude. + using manipulation_encode_t = hlsl::float64_t; + + /// @brief Semantic event identifier. + VirtualEventType type = None; + /// @brief Non-negative scalar amount associated with `type`. + /// + /// The value is not a raw device unit. It is the virtual amount emitted by + /// the active input path after applying binding-local gains. + manipulation_encode_t magnitude = {}; + + /// @brief Convert one event identifier to its stable string form. + static constexpr std::string_view virtualEventToString(VirtualEventType event) + { + switch (event) + { + case MoveForward: return "MoveForward"; + case MoveBackward: return "MoveBackward"; + case MoveLeft: return "MoveLeft"; + case MoveRight: return "MoveRight"; + case MoveUp: return "MoveUp"; + case MoveDown: return "MoveDown"; + case TiltUp: return "TiltUp"; + case TiltDown: return "TiltDown"; + case PanLeft: return "PanLeft"; + case PanRight: return "PanRight"; + case RollLeft: return "RollLeft"; + case RollRight: return "RollRight"; + case ScaleXInc: return "ScaleXInc"; + case ScaleXDec: return "ScaleXDec"; + case ScaleYInc: return "ScaleYInc"; + case ScaleYDec: return "ScaleYDec"; + case ScaleZInc: return "ScaleZInc"; + case ScaleZDec: return "ScaleZDec"; + case Translate: return "Translate"; + case Rotate: return "Rotate"; + case Scale: return "Scale"; + case None: return "None"; + default: return "Unknown"; + } + } + + /// @brief Convert one stable string identifier back to an event identifier. + static constexpr VirtualEventType stringToVirtualEvent(std::string_view event) + { + if (event == "MoveForward") return MoveForward; + if (event == "MoveBackward") return MoveBackward; + if (event == "MoveLeft") return MoveLeft; + if (event == "MoveRight") return MoveRight; + if (event == "MoveUp") return MoveUp; + if (event == "MoveDown") return MoveDown; + if (event == "TiltUp") return TiltUp; + if (event == "TiltDown") return TiltDown; + if (event == "PanLeft") return PanLeft; + if (event == "PanRight") return PanRight; + if (event == "RollLeft") return RollLeft; + if (event == "RollRight") return RollRight; + if (event == "ScaleXInc") return ScaleXInc; + if (event == "ScaleXDec") return ScaleXDec; + if (event == "ScaleYInc") return ScaleYInc; + if (event == "ScaleYDec") return ScaleYDec; + if (event == "ScaleZInc") return ScaleZInc; + if (event == "ScaleZDec") return ScaleZDec; + if (event == "Translate") return Translate; + if (event == "Rotate") return Rotate; + if (event == "Scale") return Scale; + if (event == "None") return None; + return None; + } + + /// @brief Return whether `event` belongs to the translation subset. + static constexpr bool isTranslationEvent(const VirtualEventType event) + { + return event != None && (event & Translate) == event; + } + + /// @brief Return whether `event` belongs to the rotation subset. + static constexpr bool isRotationEvent(const VirtualEventType event) + { + return event != None && (event & Rotate) == event; + } + + /// @brief Return whether `event` belongs to the scale subset. + static constexpr bool isScaleEvent(const VirtualEventType event) + { + return event != None && (event & Scale) == event; + } + + /// @brief Table listing every individual event bit in declaration order. + static inline constexpr auto VirtualEventsTypeTable = []() + { + std::array output; + + for (uint16_t i = 0u; i < EventsCount; ++i) + output[i] = static_cast(core::createBitmask({ i })); + + return output; + }(); +}; + +} // namespace nbl::core + +#endif // _NBL_C_VIRTUAL_GIMBAL_EVENT_HPP_ diff --git a/include/nbl/ext/Cameras/ICamera.hpp b/include/nbl/ext/Cameras/ICamera.hpp new file mode 100644 index 0000000000..d987e74afa --- /dev/null +++ b/include/nbl/ext/Cameras/ICamera.hpp @@ -0,0 +1,509 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _I_CAMERA_HPP_ +#define _I_CAMERA_HPP_ + +#include +#include + +#include "nbl/core/IReferenceCounted.h" +#include "nbl/core/util/bitflag.h" +#include "CCameraTraits.hpp" +#include "IGimbal.hpp" + +namespace nbl::core +{ + +/// @brief Shared runtime camera interface. +/// +/// `ICamera` consumes batches of `CVirtualGimbalEvent` values and updates one +/// camera pose stored in `CGimbal`. A `CVirtualGimbalEvent` identifies one +/// semantic command such as `MoveForward`, `PanLeft`, or `RollRight` and carries +/// one source-normalized scalar magnitude for that command. +/// +/// Keyboard input, mouse input, ImGuizmo interaction, scripted playback, +/// preset replay, follow helpers, and goal solving all drive cameras through +/// the same `manipulate(...)` entry point. +/// +/// The optional typed hooks expose camera-family state for code that needs +/// capture, restore, compatibility analysis, persistence, or validation. +class ICamera : virtual public core::IReferenceCounted +{ +private: + static inline constexpr double DefaultMoveSpeedScaleValue = 0.01; + static inline constexpr double DefaultRotationSpeedScaleValue = 0.003; + static inline constexpr double VirtualTranslationUnit = 0.01; + +public: + /// @brief Camera-local multipliers applied when semantic virtual events are converted into motion. + /// + /// Input binders emit virtual magnitudes. Concrete cameras multiply those + /// magnitudes by this per-camera configuration before applying them to + /// their own state model. + struct SMotionConfig + { + /// @brief Camera-local scale applied to virtual translation magnitudes. + double moveSpeedScale = DefaultMoveSpeedScaleValue; + /// @brief Camera-local scale applied to virtual rotation magnitudes. + double rotationSpeedScale = DefaultRotationSpeedScaleValue; + }; + + /// @brief Stable camera-family identifier used by metadata, presets, follow, and scripted helpers. + enum class CameraKind : uint8_t + { + Unknown, + FPS, + Free, + Orbit, + Arcball, + Turntable, + TopDown, + Isometric, + Chase, + Dolly, + DollyZoom, + Path + }; + + /// @brief Optional typed capabilities exposed by a concrete runtime camera implementation. + enum CameraCapability : uint32_t + { + None = 0u, + SphericalTarget = core::createBitmask({ 0 }), + DynamicPerspectiveFov = core::createBitmask({ 1 }) + }; + + /// @brief Typed state fragments that helper layers may capture from or apply to a camera. + enum GoalStateMask : uint32_t + { + GoalStateNone = 0u, + GoalStateSphericalTarget = core::createBitmask({ 0 }), + GoalStateDynamicPerspective = core::createBitmask({ 1 }), + GoalStatePath = core::createBitmask({ 2 }) + }; + + using capability_flags_t = core::bitflag; + using goal_state_flags_t = core::bitflag; + + /// @brief Canonical target-relative state reported by spherical camera families. + /// + /// The state stores the tracked target position, orbit angles in `orbitUv`, + /// and distance limits needed by tooling that wants to capture or reapply a + /// target-relative camera pose without going through free-form setters. + /// `maxDistance` is an optional upper bound and may be infinite when the + /// active camera family does not impose a finite cap. + struct SphericalTargetState + { + /// @brief Tracked target position in world space. + hlsl::float64_t3 target = hlsl::float64_t3(0.0); + /// @brief Orbit yaw and pitch around the target, expressed in radians. + hlsl::float64_t2 orbitUv = hlsl::float64_t2(0.0); + /// @brief Current camera-to-target distance. + float distance = 0.f; + /// @brief Lowest distance that remains valid for the current camera. + float minDistance = 0.f; + /// @brief Highest distance that remains valid for the current camera, or infinity when unbounded. + float maxDistance = SCameraTargetRelativeTraits::DefaultMaxDistance; + }; + + /// @brief Typed perspective state reported by cameras with derived FOV behavior. + struct DynamicPerspectiveState + { + /// @brief Authored reference FOV in degrees. + float baseFov = 0.f; + /// @brief Distance at which `baseFov` should be preserved. + float referenceDistance = 0.f; + }; + + /// @brief Limits constraining reusable `PathState` coordinates for `Path Rig` cameras. + /// + /// These limits are part of the typed path-model surface. They are not + /// global engine rules. A concrete `Path Rig` instance may expose an + /// unbounded `maxDistance` by returning infinity. + struct PathStateLimits + { + /// @brief Minimal valid `u` coordinate after path-state sanitization. + double minU = static_cast(SCameraTargetRelativeTraits::MinDistance); + /// @brief Minimal valid radial distance derived from the `(u, v)` pair. + hlsl::float64_t minDistance = static_cast(SCameraTargetRelativeTraits::MinDistance); + /// @brief Maximal valid radial distance derived from the `(u, v)` pair, or infinity when unbounded. + hlsl::float64_t maxDistance = static_cast(SCameraTargetRelativeTraits::DefaultMaxDistance); + }; + + /// @brief Parametric path-rig state used by the `Path Rig` camera kind. + /// + /// The built-in path model interprets `(s, u, v, roll)` as path progress, + /// lateral shape coordinates, and roll around the local forward axis. + /// Other path models may map the same coordinates onto different geometry. + struct PathState + { + /// @brief Primary path-progress coordinate interpreted by the active path model. + double s = 0.0; + /// @brief First lateral/shape coordinate interpreted by the active path model. + double u = 0.0; + /// @brief Second lateral/shape coordinate interpreted by the active path model. + double v = 0.0; + /// @brief Roll around the path-model forward axis, expressed in radians. + double roll = 0.0; + + /// @brief Pack the state into one four-component vector. + inline hlsl::float64_t4 asVector() const + { + return hlsl::float64_t4(s, u, v, roll); + } + + /// @brief Project the state onto the translation-style representation used by replay helpers. + inline hlsl::float64_t3 asTranslationVector() const + { + return hlsl::float64_t3(u, v, s); + } + + /// @brief Rebuild one path state from the packed vector representation. + static inline PathState fromVector(const hlsl::float64_t4& value) + { + return { + .s = value.x, + .u = value.y, + .v = value.z, + .roll = value.w + }; + } + + /// @brief Rebuild one path state from the translation-style helper representation. + static inline PathState fromTranslationVector(const hlsl::float64_t3& value, const double pathRoll = 0.0) + { + return { + .s = value.z, + .u = value.x, + .v = value.y, + .roll = pathRoll + }; + } + }; + + /// @brief Gimbal that stores the runtime camera pose and cached world-to-view transform. + /// + /// Camera implementations own one `CGimbal` instance and update it after + /// applying their internal state model. The gimbal stores world-space + /// position, orientation, and the cached view matrix derived from them. + class CGimbal : public IGimbal + { + public: + using base_t = IGimbal; + using model_matrix_t = typename base_t::model_matrix_t; + + CGimbal(typename base_t::SCreationParameters parameters) : base_t(std::move(parameters)) { updateView(); } + ~CGimbal() = default; + + inline void begin() { base_t::begin(); } + inline void setPosition(const hlsl::float64_t3& position) { base_t::setPosition(position); } + inline void setScale(const hlsl::float64_t3& scale) { base_t::setScale(scale); } + inline void setOrientation(const hlsl::camera_quaternion_t& orientation) { base_t::setOrientation(orientation); } + inline void transform(const CReferenceTransform& reference, const typename base_t::VirtualImpulse& impulse) { base_t::transform(reference, impulse); } + inline void rotate(const hlsl::float64_t3& axis, float dRadians) { base_t::rotate(axis, dRadians); } + inline void move(hlsl::float64_t3 delta) { base_t::move(delta); } + inline void strafe(hlsl::float64_t distance) { base_t::strafe(distance); } + inline void climb(hlsl::float64_t distance) { base_t::climb(distance); } + inline void advance(hlsl::float64_t distance) { base_t::advance(distance); } + inline void end() { base_t::end(); } + + inline const hlsl::float64_t3& getPosition() const { return base_t::getPosition(); } + inline const hlsl::camera_quaternion_t& getOrientation() const { return base_t::getOrientation(); } + inline const hlsl::float64_t3& getScale() const { return base_t::getScale(); } + inline const hlsl::matrix& getOrthonornalMatrix() const { return base_t::getOrthonornalMatrix(); } + inline const hlsl::float64_t3& getXAxis() const { return base_t::getXAxis(); } + inline const hlsl::float64_t3& getYAxis() const { return base_t::getYAxis(); } + inline const hlsl::float64_t3& getZAxis() const { return base_t::getZAxis(); } + inline hlsl::float64_t3 getLocalTarget() const { return base_t::getLocalTarget(); } + inline hlsl::float64_t3 getWorldTarget() const { return base_t::getWorldTarget(); } + inline const size_t& getManipulationCounter() const { return base_t::getManipulationCounter(); } + inline bool isManipulating() const { return base_t::isManipulating(); } + inline bool extractReferenceTransform(CReferenceTransform* out, const hlsl::float64_t4x4* referenceFrame = nullptr) const + { + return base_t::extractReferenceTransform(out, referenceFrame); + } + + template + inline typename base_t::VirtualImpulse accumulate(std::span virtualEvents) + { + return base_t::template accumulate(virtualEvents); + } + + /// @brief Rebuild the cached left-handed world-to-view matrix from the current gimbal pose. + inline void updateView() + { + const auto& gRight = this->getXAxis(); + const auto& gUp = this->getYAxis(); + const auto& gForward = this->getZAxis(); + + assert(hlsl::CCameraMathUtilities::isOrthoBase(gRight, gUp, gForward)); + + const auto& position = this->getPosition(); + + m_viewMatrix[0u] = hlsl::float64_t4(gRight, -hlsl::dot(gRight, position)); + m_viewMatrix[1u] = hlsl::float64_t4(gUp, -hlsl::dot(gUp, position)); + m_viewMatrix[2u] = hlsl::float64_t4(gForward, -hlsl::dot(gForward, position)); + } + + /// @brief Return the cached left-handed world-to-view matrix derived from the current pose. + inline const hlsl::float64_t3x4& getViewMatrix() const { return getViewMatrixLH(); } + + /// @brief Return the cached left-handed world-to-view matrix derived from the current pose. + inline const hlsl::float64_t3x4& getViewMatrixLH() const { return m_viewMatrix; } + + /// @brief Return the right-handed world-to-view matrix derived from the current pose. + inline hlsl::float64_t3x4 getViewMatrixRH() const + { + auto rhViewMatrix = m_viewMatrix; + rhViewMatrix[2u] *= -1.0; + return rhViewMatrix; + } + + private: + hlsl::float64_t3x4 m_viewMatrix; + }; + + class SScopedMotionScaleOverride + { + public: + /// @brief Temporarily override both motion scales and restore the previous values on destruction. + SScopedMotionScaleOverride(ICamera* camera, const double moveScale, const double rotationScale) + : m_camera(camera) + { + if (!m_camera) + return; + + m_prevMoveScale = m_camera->getMoveSpeedScale(); + m_prevRotationScale = m_camera->getRotationSpeedScale(); + m_camera->setMotionScales(moveScale, rotationScale); + } + + SScopedMotionScaleOverride(const SScopedMotionScaleOverride&) = delete; + SScopedMotionScaleOverride& operator=(const SScopedMotionScaleOverride&) = delete; + + SScopedMotionScaleOverride(SScopedMotionScaleOverride&& other) noexcept + : m_camera(std::exchange(other.m_camera, nullptr)), + m_prevMoveScale(other.m_prevMoveScale), + m_prevRotationScale(other.m_prevRotationScale) + { + } + + SScopedMotionScaleOverride& operator=(SScopedMotionScaleOverride&& other) = delete; + + ~SScopedMotionScaleOverride() + { + if (m_camera) + m_camera->setMotionScales(m_prevMoveScale, m_prevRotationScale); + } + + private: + ICamera* m_camera = nullptr; + double m_prevMoveScale = 0.0; + double m_prevRotationScale = 0.0; + }; + + ICamera() {} + virtual ~ICamera() = default; + + /// @brief Return the mutable gimbal backing the runtime camera pose. + virtual const CGimbal& getGimbal() = 0u; + + /// @brief Apply one frame of semantic virtual events and an optional rigid reference-frame anchor. + /// + /// `virtualEvents` stores one frame of semantic movement, rotation, and + /// scale commands. Translation commands use `Move*`, rotation commands use + /// `Tilt*`, `Pan*`, and `Roll*`, and scale commands use `Scale*`. Cameras + /// interpret only the subset advertised by `getAllowedVirtualEvents()`. + /// + /// `referenceFrame` is an optional rigid world-space transform used as the + /// anchor for this manipulation step. Free-like cameras may apply it + /// directly as pose input. Constrained cameras may first resolve it into + /// their own typed legal state and then apply event deltas in that state + /// space. + virtual bool manipulate(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) = 0; + /// @brief Apply one frame of virtual events while temporarily overriding the camera-local motion scales. + inline bool manipulateWithMotionScales(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame, const double moveScale, const double rotationScale) + { + auto scopedOverride = overrideMotionScales(moveScale, rotationScale); + return manipulate(virtualEvents, referenceFrame); + } + /// @brief Apply one frame of virtual events with unit translation and rotation scales. + inline bool manipulateWithUnitMotionScales(std::span virtualEvents, const hlsl::float64_t4x4* referenceFrame = nullptr) + { + return manipulateWithMotionScales(virtualEvents, referenceFrame, 1.0, 1.0); + } + + /// @brief Return the semantic virtual-event mask accepted by this camera kind. + /// + /// Input binders, scripted replay, and restore helpers use this mask to + /// decide which `CVirtualGimbalEvent` categories may be passed to + /// `manipulate(...)`. + virtual uint32_t getAllowedVirtualEvents() const = 0u; + + /// @brief Return the stable camera-family identifier for this concrete runtime camera. + virtual CameraKind getKind() const = 0; + /// @brief Return the optional typed capabilities exposed by this camera implementation. + virtual uint32_t getCapabilities() const { return None; } + /// @brief Return the typed goal-state fragments that helper layers may safely use with this camera. + virtual uint32_t getGoalStateMask() const + { + goal_state_flags_t mask = GoalStateNone; + if (hasCapability(SphericalTarget)) + mask |= GoalStateSphericalTarget; + if (hasCapability(DynamicPerspectiveFov)) + mask |= GoalStateDynamicPerspective; + return static_cast(mask.value); + } + + /// @brief Return the stable human-readable identifier for this concrete camera instance. + virtual std::string_view getIdentifier() const = 0u; + + /// @brief Check whether the camera exposes the requested optional capability. + inline bool hasCapability(CameraCapability capability) const + { + return capability_flags_t(getCapabilities()).hasFlags(capability); + } + + /// @brief Check whether the camera can exchange the requested typed goal-state fragment. + inline bool supportsGoalState(GoalStateMask goalState) const + { + return goal_state_flags_t(getGoalStateMask()).hasFlags(goalState); + } + + /// @brief Query the current spherical-target state when the camera exposes it. + virtual bool tryGetSphericalTargetState(SphericalTargetState& out) const + { + return false; + } + + /// @brief Replace only the tracked target position for spherical-target cameras. + virtual bool trySetSphericalTarget(const hlsl::float64_t3& target) + { + return false; + } + + /// @brief Replace only the tracked target distance for spherical-target cameras. + virtual bool trySetSphericalDistance(float distance) + { + return false; + } + + /// @brief Query the current derived dynamic perspective FOV when the camera exposes it. + virtual bool tryGetDynamicPerspectiveFov(float& outFov) const + { + return false; + } + + /// @brief Query the current authored dynamic perspective state when the camera exposes it. + virtual bool tryGetDynamicPerspectiveState(DynamicPerspectiveState& out) const + { + return false; + } + + /// @brief Replace the authored dynamic perspective state when the camera exposes it. + virtual bool trySetDynamicPerspectiveState(const DynamicPerspectiveState& state) + { + return false; + } + + /// @brief Query the current typed path state when the camera exposes it. + virtual bool tryGetPathState(PathState& out) const + { + return false; + } + + /// @brief Query the active typed limits constraining the current path state. + virtual bool tryGetPathStateLimits(PathStateLimits& out) const + { + return false; + } + + /// @brief Replace the current typed path state when the camera exposes it. + virtual bool trySetPathState(const PathState& state) + { + return false; + } + + /// @brief Update only the translation motion scale used by the camera runtime. + inline void setMoveSpeedScale(double scalar) + { + m_motionConfig.moveSpeedScale = scalar; + } + + /// @brief Update only the rotation motion scale used by the camera runtime. + inline void setRotationSpeedScale(double scalar) + { + m_motionConfig.rotationSpeedScale = scalar; + } + + /// @brief Update both translation and rotation motion scales at once. + inline void setMotionScales(const double moveScale, const double rotationScale) + { + setMoveSpeedScale(moveScale); + setRotationSpeedScale(rotationScale); + } + + /// @brief Return the current translation motion scale. + inline double getMoveSpeedScale() const { return m_motionConfig.moveSpeedScale; } + /// @brief Return the current rotation motion scale. + inline double getRotationSpeedScale() const { return m_motionConfig.rotationSpeedScale; } + /// @brief Return the full motion-scale bundle. + inline const SMotionConfig& getMotionConfig() const { return m_motionConfig; } + /// @brief Return the effective world-space translation represented by a unit virtual move event. + inline double getScaledVirtualTranslationMagnitude() const + { + return getUnscaledVirtualTranslationMagnitude() * getMoveSpeedScale(); + } + /// @brief Return the raw translation magnitude before applying the camera-local move scale. + inline double getUnscaledVirtualTranslationMagnitude() const + { + return VirtualTranslationUnit; + } + /// @brief Scale one scalar translation magnitude through the active move scale. + inline double scaleVirtualTranslation(const double magnitude) const + { + return magnitude * getScaledVirtualTranslationMagnitude(); + } + /// @brief Scale one translation vector through the active move scale. + template + inline hlsl::camera_vector_t scaleVirtualTranslation(const hlsl::camera_vector_t& magnitude) const + { + return magnitude * static_cast(getScaledVirtualTranslationMagnitude()); + } + /// @brief Scale one scalar translation magnitude without applying the camera-local move scale. + inline double scaleUnscaledVirtualTranslation(const double magnitude) const + { + return magnitude * getUnscaledVirtualTranslationMagnitude(); + } + /// @brief Scale one translation vector without applying the camera-local move scale. + template + inline hlsl::camera_vector_t scaleUnscaledVirtualTranslation(const hlsl::camera_vector_t& magnitude) const + { + return magnitude * static_cast(getUnscaledVirtualTranslationMagnitude()); + } + /// @brief Scale one scalar rotation magnitude through the active rotation scale. + inline double scaleVirtualRotation(const double magnitude) const + { + return magnitude * getRotationSpeedScale(); + } + /// @brief Scale one rotation vector through the active rotation scale. + template + inline hlsl::camera_vector_t scaleVirtualRotation(const hlsl::camera_vector_t& magnitude) const + { + return magnitude * static_cast(getRotationSpeedScale()); + } + /// @brief Create a scoped helper that restores the previous motion scales on destruction. + inline SScopedMotionScaleOverride overrideMotionScales(const double moveScale, const double rotationScale) + { + return SScopedMotionScaleOverride(this, moveScale, rotationScale); + } + +protected: + SMotionConfig m_motionConfig; +}; + +} + +#endif // _I_CAMERA_HPP_ diff --git a/include/nbl/ext/Cameras/IGimbal.hpp b/include/nbl/ext/Cameras/IGimbal.hpp new file mode 100644 index 0000000000..96aea65f73 --- /dev/null +++ b/include/nbl/ext/Cameras/IGimbal.hpp @@ -0,0 +1,370 @@ +#ifndef _NBL_IGIMBAL_HPP_ +#define _NBL_IGIMBAL_HPP_ + +#include +#include +#include +#include + +#include "nbl/type_traits.h" +#include "CCameraMathUtilities.hpp" +#include "CVirtualGimbalEvent.hpp" + +namespace nbl::core +{ + /// @brief Optional rigid reference frame used to reinterpret a frame of semantic camera input. + /// + /// Some camera consumers replay authored input relative to an external frame + /// instead of the current camera pose. This bundle stores the rigid transform + /// and its orientation in a form ready for `IGimbal::transform(...)`. + struct CReferenceTransform + { + hlsl::float64_t4x4 frame; + hlsl::camera_quaternion_t orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion(); + }; + + /// @brief Generic world-space gimbal used by runtime cameras and tracked targets. + /// + /// The gimbal stores position, orientation, scale, and an orthonormal local + /// basis. It also exposes `accumulate(...)`, which converts one batch of + /// semantic `CVirtualGimbalEvent` values into translation, rotation, and + /// scale impulses for a single manipulation step. + template + requires is_any_of_v + class IGimbal + { + public: + using precision_t = T; + using quaternion_t = hlsl::camera_quaternion_t; + template + using vector_t = hlsl::camera_vector_t; + /// @brief underlying type for world matrix (TRS) + using model_matrix_t = hlsl::matrix; + + /// @brief One frame of accumulated virtual translation, rotation, and scaling intent. + struct VirtualImpulse + { + vector_t<3u> dVirtualTranslate { 0.0f }, dVirtualRotation { 0.0f }, dVirtualScale { 1.0f }; + }; + + /// @brief Accumulates one frame of virtual events into a translation/rotation/scale impulse. + template + VirtualImpulse accumulate(std::span virtualEvents, const vector_t<3u>& gRightOverride, const vector_t<3u>& gUpOverride, const vector_t<3u>& gForwardOverride) + { + VirtualImpulse impulse; + + for (const auto& event : virtualEvents) + { + assert(event.magnitude >= 0); + + // translation events + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveRight) + if (event.type == CVirtualGimbalEvent::MoveRight) + impulse.dVirtualTranslate.x += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveLeft) + if (event.type == CVirtualGimbalEvent::MoveLeft) + impulse.dVirtualTranslate.x -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveUp) + if (event.type == CVirtualGimbalEvent::MoveUp) + impulse.dVirtualTranslate.y += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveDown) + if (event.type == CVirtualGimbalEvent::MoveDown) + impulse.dVirtualTranslate.y -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveForward) + if (event.type == CVirtualGimbalEvent::MoveForward) + impulse.dVirtualTranslate.z += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveBackward) + if (event.type == CVirtualGimbalEvent::MoveBackward) + impulse.dVirtualTranslate.z -= static_cast(event.magnitude); + + // rotation events + if constexpr (AllowedEvents & CVirtualGimbalEvent::TiltUp) + if (event.type == CVirtualGimbalEvent::TiltUp) + impulse.dVirtualRotation.x += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::TiltDown) + if (event.type == CVirtualGimbalEvent::TiltDown) + impulse.dVirtualRotation.x -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::PanRight) + if (event.type == CVirtualGimbalEvent::PanRight) + impulse.dVirtualRotation.y += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::PanLeft) + if (event.type == CVirtualGimbalEvent::PanLeft) + impulse.dVirtualRotation.y -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::RollRight) + if (event.type == CVirtualGimbalEvent::RollRight) + impulse.dVirtualRotation.z += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::RollLeft) + if (event.type == CVirtualGimbalEvent::RollLeft) + impulse.dVirtualRotation.z -= static_cast(event.magnitude); + + // scaling events + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleXInc) + if (event.type == CVirtualGimbalEvent::ScaleXInc) + impulse.dVirtualScale.x *= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleXDec) + if (event.type == CVirtualGimbalEvent::ScaleXDec) + impulse.dVirtualScale.x *= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleYInc) + if (event.type == CVirtualGimbalEvent::ScaleYInc) + impulse.dVirtualScale.y *= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleYDec) + if (event.type == CVirtualGimbalEvent::ScaleYDec) + impulse.dVirtualScale.y *= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleZInc) + if (event.type == CVirtualGimbalEvent::ScaleZInc) + impulse.dVirtualScale.z *= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleZDec) + if (event.type == CVirtualGimbalEvent::ScaleZDec) + impulse.dVirtualScale.z *= static_cast(event.magnitude); + } + + return impulse; + } + + /// @brief Accumulate one frame of virtual events using the current gimbal basis as the reference frame. + template + VirtualImpulse accumulate(std::span virtualEvents) + { + return accumulate(virtualEvents, getXAxis(), getYAxis(), getZAxis()); + } + + /// @brief Construction-time pose for one gimbal instance. + struct SCreationParameters + { + vector_t<3u> position; + quaternion_t orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion(); + }; + + IGimbal(const IGimbal&) = default; + IGimbal(IGimbal&&) noexcept = default; + IGimbal& operator=(const IGimbal&) = default; + IGimbal& operator=(IGimbal&&) noexcept = default; + + IGimbal(SCreationParameters&& parameters) + : m_position(parameters.position), m_orientation(parameters.orientation) + { + updateOrthonormalOrientationBase(); + } + + /// @brief Enter manipulation mode and reset the per-frame manipulation counter. + void begin() + { + m_isManipulating = true; + m_counter = 0u; + } + + /// @brief Replace the world-space position while the gimbal is in manipulation mode. + inline void setPosition(const vector_t<3u>& position) + { + assert(m_isManipulating); + + if (m_position != position) + m_counter++; + + m_position = position; + } + + /// @brief Replace the scale component stored by the gimbal. + inline void setScale(const vector_t<3u>& scale) + { + m_scale = scale; + } + + /// @brief Replace the orientation while keeping the orthonormal basis normalized. + inline void setOrientation(const quaternion_t& orientation) + { + assert(m_isManipulating); + + if (m_orientation.data != orientation.data) + m_counter++; + + m_orientation = hlsl::CCameraMathUtilities::normalizeQuaternion(orientation); + updateOrthonormalOrientationBase(); + } + + /// @brief Apply a prebuilt rigid reference transform and an accumulated impulse in one step. + inline void transform(const CReferenceTransform& reference, const VirtualImpulse& impulse) + { + setOrientation(reference.orientation * hlsl::CCameraMathUtilities::makeQuaternionFromEulerRadiansYXZ(impulse.dVirtualRotation)); + setPosition( + hlsl::float64_t3(reference.frame[3]) + + hlsl::CCameraMathUtilities::rotateVectorByQuaternion(reference.orientation, hlsl::float64_t3(impulse.dVirtualTranslate)) + ); + } + + /// @brief Rotate the gimbal around a world-space axis by the requested angle in radians. + inline void rotate(const vector_t<3u>& axis, float dRadians) + { + assert(m_isManipulating); + + if(dRadians) + m_counter++; + + const auto dRotation = hlsl::CCameraMathUtilities::makeQuaternionFromAxisAngle(axis, static_cast(dRadians)); + m_orientation = hlsl::CCameraMathUtilities::normalizeQuaternion(dRotation * m_orientation); + updateOrthonormalOrientationBase(); + } + + /// @brief Translate the gimbal directly in world space. + inline void move(vector_t<3u> delta) + { + assert(m_isManipulating); + + auto newPosition = m_position + delta; + + if (newPosition != m_position) + m_counter++; + + m_position = newPosition; + } + + /// @brief Translate the gimbal along its local right axis. + inline void strafe(precision_t distance) + { + move(getXAxis() * distance); + } + + /// @brief Translate the gimbal along its local up axis. + inline void climb(precision_t distance) + { + move(getYAxis() * distance); + } + + /// @brief Translate the gimbal along its local forward axis. + inline void advance(precision_t distance) + { + move(getZAxis() * distance); + } + + /// @brief Leave manipulation mode after all pose updates for the current frame are finished. + inline void end() + { + m_isManipulating = false; + } + + /// @brief Position of gimbal in world space + inline const vector_t<3u>& getPosition() const { return m_position; } + + /// @brief Orientation of gimbal + inline const quaternion_t& getOrientation() const { return m_orientation; } + + /// @brief Scale transform component + inline const vector_t<3u>& getScale() const { return m_scale; } + + /// @brief World matrix (TRS) + template + requires is_any_of_v> + const TRS operator()() const + { + const auto& position = getPosition(); + const auto& rotation = getOrthonornalMatrix(); + const auto& scale = getScale(); + + if constexpr (std::is_same_v) + { + return + { + hlsl::camera_vector_t(rotation[0] * scale.x, position.x), + hlsl::camera_vector_t(rotation[1] * scale.y, position.y), + hlsl::camera_vector_t(rotation[2] * scale.z, position.z) + }; + } + else + { + return + { + hlsl::camera_vector_t(rotation[0] * scale.x, T(0)), + hlsl::camera_vector_t(rotation[1] * scale.y, T(0)), + hlsl::camera_vector_t(rotation[2] * scale.z, T(0)), + hlsl::camera_vector_t(position, T(1)) + }; + } + } + + /// @brief Orthonormal [getXAxis(), getYAxis(), getZAxis()] orientation matrix + inline const hlsl::matrix& getOrthonornalMatrix() const { return m_orthonormal; } + + /// @brief Base "right" vector in orthonormal orientation basis (X-axis) + inline const vector_t<3u>& getXAxis() const { return m_orthonormal[0u]; } + + /// @brief Base "up" vector in orthonormal orientation basis (Y-axis) + inline const vector_t<3u>& getYAxis() const { return m_orthonormal[1u]; } + + /// @brief Base "forward" vector in orthonormal orientation basis (Z-axis) + inline const vector_t<3u>& getZAxis() const { return m_orthonormal[2u]; } + + /// @brief Target vector in local space, alias for getZAxis() + inline vector_t<3u> getLocalTarget() const { return getZAxis(); } + + /// @brief Target vector in world space + inline vector_t<3u> getWorldTarget() const { return getPosition() + getLocalTarget(); } + + /// @brief Counts how many times a valid manipulation has been performed, the counter resets when begin() is called + inline const size_t& getManipulationCounter() const { return m_counter; } + + /// @brief Returns true if gimbal records a manipulation + inline bool isManipulating() const { return m_isManipulating; } + + /// @brief Build a rigid reference transform either from an external frame or from the current gimbal pose. + bool extractReferenceTransform(CReferenceTransform* out, const hlsl::float64_t4x4* referenceFrame = nullptr) const + { + if (not out) + return false; + + if (referenceFrame) + { + if (!hlsl::CCameraMathUtilities::tryBuildRigidFrameFromTransform(*referenceFrame, out->frame, out->orientation)) + return false; + } + else + { + out->orientation = getOrientation(); + out->frame = hlsl::CCameraMathUtilities::composeTransformMatrix(getPosition(), out->orientation); + } + + return true; + } + + private: + inline void updateOrthonormalOrientationBase() + { + m_orthonormal = hlsl::CCameraMathUtilities::getQuaternionBasisMatrix(m_orientation); + } + + /// @brief Position of a gimbal in world space + vector_t<3u> m_position; + + /// @brief Normalized orientation of gimbal + quaternion_t m_orientation; + + /// @brief Scale transform component + vector_t<3u> m_scale = { 1.f, 1.f , 1.f }; + + /// @brief Orthonormal basis reconstructed from the current orientation. + hlsl::matrix m_orthonormal; + + /// @brief Counter that increments for each performed manipulation, resets with each begin() call + size_t m_counter = {}; + + /// @brief Tracks whether gimbal is currently in manipulation mode + bool m_isManipulating = false; + + }; +} // namespace nbl::core + +#endif // _NBL_IGIMBAL_HPP_ diff --git a/include/nbl/ext/Cameras/IGimbalBindingLayout.hpp b/include/nbl/ext/Cameras/IGimbalBindingLayout.hpp new file mode 100644 index 0000000000..c1b09427bb --- /dev/null +++ b/include/nbl/ext/Cameras/IGimbalBindingLayout.hpp @@ -0,0 +1,131 @@ +#ifndef _NBL_I_GIMBAL_BINDING_LAYOUT_HPP_ +#define _NBL_I_GIMBAL_BINDING_LAYOUT_HPP_ + +#include +#include + +#include "CVirtualGimbalEvent.hpp" +#include "nbl/ui/KeyCodes.h" + +namespace nbl::ui +{ + +/// @brief Static mapping from external input domains to virtual gimbal events. +/// +/// This type stores binding layout only. It does not process runtime input. +/// Each binding chooses both the semantic virtual event type and the gain used +/// to convert raw producer values into `CVirtualGimbalEvent::magnitude`. +struct IGimbalBindingLayout +{ + IGimbalBindingLayout() {} + virtual ~IGimbalBindingLayout() {} + + using gimbal_event_t = core::CVirtualGimbalEvent; + using encode_keyboard_code_t = ui::E_KEY_CODE; + using encode_mouse_code_t = ui::E_MOUSE_CODE; + using encode_imguizmo_code_t = gimbal_event_t::VirtualEventType; + + enum BindingDomain : uint8_t + { + Keyboard, + Mouse, + Imguizmo, + + Count + }; + + struct CKeyInfo + { + union + { + encode_keyboard_code_t keyboardCode; + encode_mouse_code_t mouseCode; + encode_imguizmo_code_t imguizmoCode; + }; + + CKeyInfo(encode_keyboard_code_t code) : keyboardCode(code), type(Keyboard) {} + CKeyInfo(encode_mouse_code_t code) : mouseCode(code), type(Mouse) {} + CKeyInfo(encode_imguizmo_code_t code) : imguizmoCode(code), type(Imguizmo) {} + + BindingDomain type; + }; + + struct CHashInfo + { + static inline constexpr double DefaultMagnitudeScale = 1.0; + + CHashInfo() {} + CHashInfo(gimbal_event_t::VirtualEventType _type, const double _magnitudeScale = DefaultMagnitudeScale) + : event({ .type = _type }), magnitudeScale(_magnitudeScale) {} + ~CHashInfo() = default; + + /// @brief Virtual event emitted by this binding. + gimbal_event_t event = {}; + /// @brief Per-binding gain applied when raw input is converted into one virtual-event magnitude. + double magnitudeScale = DefaultMagnitudeScale; + /// @brief Runtime latch used by held keyboard and mouse-button bindings. + bool active = false; + }; + + using keyboard_to_virtual_events_t = std::unordered_map; + using mouse_to_virtual_events_t = std::unordered_map; + using imguizmo_to_virtual_events_t = std::unordered_map; + + virtual const keyboard_to_virtual_events_t& getKeyboardVirtualEventMap() const = 0; + virtual const mouse_to_virtual_events_t& getMouseVirtualEventMap() const = 0; + virtual const imguizmo_to_virtual_events_t& getImguizmoVirtualEventMap() const = 0; + + virtual void updateKeyboardMapping(const std::function& mapKeys) = 0; + virtual void updateMouseMapping(const std::function& mapKeys) = 0; + virtual void updateImguizmoMapping(const std::function& mapKeys) = 0; + + inline void copyBindingLayoutFrom(const IGimbalBindingLayout& layout) + { + updateKeyboardMapping([&](auto& map) { map = sanitizeMapping(layout.getKeyboardVirtualEventMap()); }); + updateMouseMapping([&](auto& map) { map = sanitizeMapping(layout.getMouseVirtualEventMap()); }); + updateImguizmoMapping([&](auto& map) { map = sanitizeMapping(layout.getImguizmoVirtualEventMap()); }); + } + + inline void copyBindingLayoutTo(IGimbalBindingLayout& layout) const + { + layout.updateKeyboardMapping([&](auto& map) { map = sanitizeMapping(getKeyboardVirtualEventMap()); }); + layout.updateMouseMapping([&](auto& map) { map = sanitizeMapping(getMouseVirtualEventMap()); }); + layout.updateImguizmoMapping([&](auto& map) { map = sanitizeMapping(getImguizmoVirtualEventMap()); }); + } + +protected: + template + inline static Map sanitizeMapping(const Map& source) + { + Map result; + for (const auto& [code, hash] : source) + result.emplace(code, typename Map::mapped_type(hash.event.type, hash.magnitudeScale)); + return result; + } +}; + +class CGimbalBindingLayoutStorage : public IGimbalBindingLayout +{ +public: + /// @brief Mutable storage for active or preset binding layout. + using IGimbalBindingLayout::IGimbalBindingLayout; + + CGimbalBindingLayoutStorage() {} + virtual ~CGimbalBindingLayoutStorage() {} + + virtual void updateKeyboardMapping(const std::function& mapKeys) override { mapKeys(m_keyboardVirtualEventMap); } + virtual void updateMouseMapping(const std::function& mapKeys) override { mapKeys(m_mouseVirtualEventMap); } + virtual void updateImguizmoMapping(const std::function& mapKeys) override { mapKeys(m_imguizmoVirtualEventMap); } + + virtual const keyboard_to_virtual_events_t& getKeyboardVirtualEventMap() const override { return m_keyboardVirtualEventMap; } + virtual const mouse_to_virtual_events_t& getMouseVirtualEventMap() const override { return m_mouseVirtualEventMap; } + virtual const imguizmo_to_virtual_events_t& getImguizmoVirtualEventMap() const override { return m_imguizmoVirtualEventMap; } + + keyboard_to_virtual_events_t m_keyboardVirtualEventMap; + mouse_to_virtual_events_t m_mouseVirtualEventMap; + imguizmo_to_virtual_events_t m_imguizmoVirtualEventMap; +}; + +} // namespace nbl::ui + +#endif diff --git a/include/nbl/ext/Cameras/IGimbalInputProcessor.hpp b/include/nbl/ext/Cameras/IGimbalInputProcessor.hpp new file mode 100644 index 0000000000..f8e5265320 --- /dev/null +++ b/include/nbl/ext/Cameras/IGimbalInputProcessor.hpp @@ -0,0 +1,301 @@ +#ifndef _NBL_I_GIMBAL_INPUT_PROCESSOR_HPP_ +#define _NBL_I_GIMBAL_INPUT_PROCESSOR_HPP_ + +#include +#include + +#include "nbl/builtin/hlsl/tgmath.hlsl" +#include "nbl/ui/KeyCodes.h" +#include "nbl/ui/SInputEvent.h" + +#include "IGimbalBindingLayout.hpp" + +namespace nbl::ui +{ + +/// @brief Runtime processor that turns keyboard, mouse, and ImGuizmo input into virtual events. +/// +/// Held keyboard and mouse-button bindings emit `frameDeltaSeconds * magnitudeScale`. +/// Relative mouse movement, mouse scroll, and ImGuizmo deltas emit +/// `abs(rawDelta) * magnitudeScale` per bound axis. The result is written into +/// `CVirtualGimbalEvent::magnitude`. +class IGimbalInputProcessor : public CGimbalBindingLayoutStorage +{ +public: + struct SInputProcessorDefaults final + { + /// @brief Largest frame interval, in seconds, accepted from held-input accumulation. + static inline constexpr double MaxFrameDeltaSeconds = 0.2; + static inline constexpr float ZeroPivot = 0.0f; + static inline constexpr float UnitPivot = 1.0f; + }; + static inline constexpr double MaxFrameDeltaSeconds = SInputProcessorDefaults::MaxFrameDeltaSeconds; + static inline constexpr float ZeroPivot = SInputProcessorDefaults::ZeroPivot; + static inline constexpr float UnitPivot = SInputProcessorDefaults::UnitPivot; + + using CGimbalBindingLayoutStorage::CGimbalBindingLayoutStorage; + + IGimbalInputProcessor() = default; + virtual ~IGimbalInputProcessor() = default; + + /// @brief Keyboard events consumed by the processor. + using input_keyboard_event_t = ui::SKeyboardEvent; + + /// @brief Mouse events consumed by the processor. + using input_mouse_event_t = ui::SMouseEvent; + + /// @brief ImGuizmo world-space delta transforms consumed by the processor. + using input_imguizmo_event_t = hlsl::float32_t4x4; + + void beginInputProcessing(const std::chrono::microseconds nextPresentationTimeStamp); + + void endInputProcessing(); + + struct SUpdateParameters + { + std::span keyboardEvents = {}; + std::span mouseEvents = {}; + std::span imguizmoEvents = {}; + }; + + /// @brief Process combined events from `SUpdateParameters` into virtual manipulation events. + /// + /// @note This function combines keyboard, mouse, and ImGuizmo processing. + /// It delegates the actual work to `processKeyboard`, `processMouse`, and + /// `processImguizmo`, then accumulates their output and total count. + /// + /// @param output Pointer to the destination array for generated gimbal events. + /// Pass `nullptr` to query only the total event count. + /// @param count Output total number of generated gimbal events. + /// @param parameters Individual keyboard, mouse, and ImGuizmo input spans. + void process(gimbal_event_t* output, uint32_t& count, const SUpdateParameters parameters = {}); + + /// @brief Process keyboard events into virtual manipulation events. + /// + /// @note This function maps keyboard press and release events into virtual + /// gimbal manipulation events through the active keyboard bindings. + /// Held keys contribute elapsed seconds scaled by the binding gain. + /// + /// @param output Pointer to the destination array for generated gimbal events. + /// Pass `nullptr` to query only the total event count. + /// @param count Output number of generated gimbal events. + /// @param events Keyboard events to process. + void processKeyboard(gimbal_event_t* output, uint32_t& count, std::span events); + + /// @brief Process mouse events into virtual manipulation events. + /// + /// @note This function maps mouse clicks, scrolls, and movements into + /// virtual gimbal manipulation events through the active mouse bindings. + /// Relative movement and scroll contribute absolute signed deltas scaled by + /// the matching binding gain. + /// + /// @param output Pointer to the destination array for generated gimbal events. + /// Pass `nullptr` to query only the total event count. + /// @param count Output number of generated gimbal events. + /// @param events Mouse events to process. + void processMouse(gimbal_event_t* output, uint32_t& count, std::span events); + + /// @brief Process ImGuizmo transforms into virtual gimbal events. + /// + /// @note This function converts world-space delta transforms authored by + /// ImGuizmo into translation, rotation, and scale virtual events. + /// Translation uses world-space delta components. Rotation uses extracted + /// Euler radians. Scale uses multiplicative components around pivot `1`. + /// + /// @param output Pointer to the destination array for generated gimbal events. + /// Pass `nullptr` to query only the total event count. + /// @param count Output number of generated gimbal events. + /// @param events ImGuizmo delta transforms to process. + void processImguizmo(gimbal_event_t* output, uint32_t& count, std::span events); + +private: + template + struct SEncodedAxisBindingGroup final + { + std::array positive = {}; + std::array negative = {}; + }; + + struct SInputProcessorBindingGroups final + { + static inline constexpr SEncodedAxisBindingGroup MouseScroll = { + .positive = { + ui::EMC_VERTICAL_POSITIVE_SCROLL, + ui::EMC_HORIZONTAL_POSITIVE_SCROLL + }, + .negative = { + ui::EMC_VERTICAL_NEGATIVE_SCROLL, + ui::EMC_HORIZONTAL_NEGATIVE_SCROLL + } + }; + + static inline constexpr SEncodedAxisBindingGroup MouseRelativeMovement = { + .positive = { + ui::EMC_RELATIVE_POSITIVE_MOVEMENT_X, + ui::EMC_RELATIVE_POSITIVE_MOVEMENT_Y + }, + .negative = { + ui::EMC_RELATIVE_NEGATIVE_MOVEMENT_X, + ui::EMC_RELATIVE_NEGATIVE_MOVEMENT_Y + } + }; + + static inline constexpr SEncodedAxisBindingGroup ImguizmoTranslation = { + .positive = { + gimbal_event_t::MoveRight, + gimbal_event_t::MoveUp, + gimbal_event_t::MoveForward + }, + .negative = { + gimbal_event_t::MoveLeft, + gimbal_event_t::MoveDown, + gimbal_event_t::MoveBackward + } + }; + + static inline constexpr SEncodedAxisBindingGroup ImguizmoRotation = { + .positive = { + gimbal_event_t::TiltUp, + gimbal_event_t::PanRight, + gimbal_event_t::RollRight + }, + .negative = { + gimbal_event_t::TiltDown, + gimbal_event_t::PanLeft, + gimbal_event_t::RollLeft + } + }; + + static inline constexpr SEncodedAxisBindingGroup ImguizmoScale = { + .positive = { + gimbal_event_t::ScaleXInc, + gimbal_event_t::ScaleYInc, + gimbal_event_t::ScaleZInc + }, + .negative = { + gimbal_event_t::ScaleXDec, + gimbal_event_t::ScaleYDec, + gimbal_event_t::ScaleZDec + } + }; + }; + + static double clampFrameDeltaTimeSeconds( + const std::chrono::microseconds nextPresentationTimeStamp, + const std::chrono::microseconds lastVirtualUpTimeStamp); + + template + void processBindingMap(Map& map, gimbal_event_t* output, uint32_t& count, ConsumeFn&& consume) + { + count = 0u; + const auto mappedVirtualEventsCount = static_cast(map.size()); + if (!output) + { + count = mappedVirtualEventsCount; + return; + } + if (!mappedVirtualEventsCount) + return; + + preprocess(map); + consume(map); + postprocess(map, output, count); + } + + static bool tryGetMouseButtonCode( + const ui::E_MOUSE_BUTTON button, + ui::E_MOUSE_CODE& outCode); + + template + void updateMouseButtonState(Map& map, const input_mouse_event_t::SClickEvent& clickEvent) + { + ui::E_MOUSE_CODE mouseCode = ui::EMC_NONE; + if (!tryGetMouseButtonCode(clickEvent.mouseButton, mouseCode)) + return; + + if (clickEvent.action == input_mouse_event_t::SClickEvent::EA_PRESSED) + setBindingActiveState(map, mouseCode, true); + else if (clickEvent.action == input_mouse_event_t::SClickEvent::EA_RELEASED) + setBindingActiveState(map, mouseCode, false); + } + + template + void setBindingActiveState(Map& map, const Code code, const bool active) + { + const auto request = map.find(code); + if (request == map.end()) + return; + + request->second.active = active; + } + + void preprocess(auto& map) + { + for (auto& [key, hash] : map) + { + hash.event.magnitude = 0.0f; + + if (hash.active) + hash.event.magnitude = m_frameDeltaSeconds * hash.magnitudeScale; + } + } + + void postprocess(const auto& map, gimbal_event_t* output, uint32_t& count) + { + for (const auto& [key, hash] : map) + if (hash.event.magnitude) + { + auto* virtualEvent = output + count; + virtualEvent->type = hash.event.type; + virtualEvent->magnitude = hash.event.magnitude; + ++count; + } + } + + template + void requestMagnitudeUpdateWithScalar(float signPivot, float dScalar, EncodeType positive, EncodeType negative, Map& map) + { + if (dScalar != signPivot) + { + const auto dMagnitude = hlsl::abs(dScalar); + auto code = (dScalar > signPivot) ? positive : negative; + auto request = map.find(code); + if (request != map.end()) + request->second.event.magnitude += dMagnitude * request->second.magnitudeScale; + } + } + + template + void requestMagnitudeUpdateWithSignedComponents( + float signPivot, + const hlsl::vector& components, + const std::array& positive, + const std::array& negative, + Map& map) + { + for (uint32_t i = 0u; i < N; ++i) + requestMagnitudeUpdateWithScalar(signPivot, components[i], positive[i], negative[i], map); + } + + template + void requestMagnitudeUpdateWithSignedComponents( + float signPivot, + const hlsl::vector& components, + const SEncodedAxisBindingGroup& bindings, + Map& map) + { + requestMagnitudeUpdateWithSignedComponents( + signPivot, + components, + bindings.positive, + bindings.negative, + map); + } + + double m_frameDeltaSeconds = {}; + std::chrono::microseconds m_nextPresentationTimeStamp = {}, m_lastVirtualUpTimeStamp = {}; +}; + +} // namespace nbl::ui + +#endif // _NBL_I_GIMBAL_INPUT_PROCESSOR_HPP_ diff --git a/include/nbl/ext/Cameras/ILinearProjection.hpp b/include/nbl/ext/Cameras/ILinearProjection.hpp new file mode 100644 index 0000000000..c4c0ce2d51 --- /dev/null +++ b/include/nbl/ext/Cameras/ILinearProjection.hpp @@ -0,0 +1,128 @@ +#ifndef _NBL_I_LINEAR_PROJECTION_HPP_ +#define _NBL_I_LINEAR_PROJECTION_HPP_ + +#include + +#include "nbl/core/decl/smart_refctd_ptr.h" +#include "IProjection.hpp" +#include "ICamera.hpp" + +namespace nbl::core +{ + +/// @brief Interface for any custom linear projection transformation. +/// +/// Matrix elements are already evaluated scalars referencing a camera. +/// This covers perspective, orthographic, oblique, axonometric, and shear projections. +class ILinearProjection : virtual public core::IReferenceCounted +{ +protected: + ILinearProjection(core::smart_refctd_ptr&& camera) + : m_camera(std::move(camera)) {} + virtual ~ILinearProjection() = default; + + core::smart_refctd_ptr m_camera; +public: + /// @brief World transform type expected by the linear projection helpers. + using model_matrix_t = typename ICamera::CGimbal::model_matrix_t; + + /// @brief Matrix type used for fully concatenated linear transforms. + using concatenated_matrix_t = hlsl::float64_t4x4; + + /// @brief Optional inverse of a concatenated transform when the matrix is not singular. + using inv_concatenated_matrix_t = std::optional; + + /// @brief One concrete linear projection matrix together with cached inverse metadata. + struct CProjection : public IProjection + { + using IProjection::IProjection; + using projection_matrix_t = concatenated_matrix_t; + using inv_projection_matrix_t = inv_concatenated_matrix_t; + + CProjection(); + explicit CProjection(const projection_matrix_t& matrix); + + /// @brief Returns P (Projection matrix) + inline const projection_matrix_t& getProjectionMatrix() const { return m_projectionMatrix; } + + /// @brief Returns P⁻¹ (Inverse of Projection matrix) *if it exists* + inline const inv_projection_matrix_t& getInvProjectionMatrix() const { return m_invProjectionMatrix; } + + inline const std::optional& isProjectionLeftHanded() const { return m_isProjectionLeftHanded; } + inline bool isProjectionSingular() const { return m_isProjectionSingular; } + virtual ProjectionType getProjectionType() const override { return ProjectionType::Linear; } + + virtual void project(const projection_vector_t& vecToProjectionSpace, projection_vector_t& output) const override + { + output = hlsl::mul(m_projectionMatrix, vecToProjectionSpace); + } + + virtual bool unproject(const projection_vector_t& vecFromProjectionSpace, projection_vector_t& output) const override + { + if (m_isProjectionSingular) + return false; + + output = hlsl::mul(m_invProjectionMatrix.value(), vecFromProjectionSpace); + + return true; + } + + protected: + /// @brief Replace the projection matrix and rebuild cached handedness and inverse information. + void setProjectionMatrix(const projection_matrix_t& matrix); + + private: + projection_matrix_t m_projectionMatrix; + inv_projection_matrix_t m_invProjectionMatrix; + std::optional m_isProjectionLeftHanded; + bool m_isProjectionSingular; + }; + + /// @brief Return the number of linear projection entries owned by the concrete wrapper. + virtual uint32_t getLinearProjectionCount() const = 0; + /// @brief Return one linear projection entry by index. + virtual const CProjection& getLinearProjection(uint32_t index) const = 0; + + /// @brief Replace the camera referenced by this projection wrapper. + bool setCamera(core::smart_refctd_ptr&& camera); + + /// @brief Return the camera referenced by this projection wrapper. + ICamera* getCamera(); + + /// @brief Compute the model-view matrix. + /// + /// @param model World TRS matrix. + /// @return The model-view matrix. + concatenated_matrix_t getMV(const model_matrix_t& model) const; + + /// @brief Compute the model-view-projection matrix from a model matrix. + /// + /// @param projection Linear projection. + /// @param model World TRS matrix. + /// @return The model-view-projection matrix. + concatenated_matrix_t getMVP(const CProjection& projection, const model_matrix_t& model) const; + + /// @brief Compute the model-view-projection matrix from a model-view matrix. + /// + /// @param projection Linear projection. + /// @param mv Model-view matrix. + /// @return The model-view-projection matrix. + concatenated_matrix_t getMVP(const CProjection& projection, const concatenated_matrix_t& mv) const; + + /// @brief Compute the inverse model-view matrix. + /// + /// @param model World TRS matrix. + /// @return The inverse model-view matrix when it exists, otherwise `std::nullopt`. + inv_concatenated_matrix_t getMVInverse(const model_matrix_t& model) const; + + /// @brief Compute the inverse model-view-projection matrix. + /// + /// @param projection Linear projection. + /// @param model World TRS matrix. + /// @return The inverse model-view-projection matrix when it exists, otherwise `std::nullopt`. + inv_concatenated_matrix_t getMVPInverse(const CProjection& projection, const model_matrix_t& model) const; +}; + +} // namespace nbl::core + +#endif // _NBL_I_LINEAR_PROJECTION_HPP_ diff --git a/include/nbl/ext/Cameras/IPerspectiveProjection.hpp b/include/nbl/ext/Cameras/IPerspectiveProjection.hpp new file mode 100644 index 0000000000..c8ee879c44 --- /dev/null +++ b/include/nbl/ext/Cameras/IPerspectiveProjection.hpp @@ -0,0 +1,64 @@ +#ifndef _NBL_I_QUAD_PROJECTION_HPP_ +#define _NBL_I_QUAD_PROJECTION_HPP_ + +#include "ILinearProjection.hpp" + +namespace nbl::core +{ + +/// @brief Interface for quad projections. +/// +/// This projection transforms a vector into the model space of a perspective +/// quad defined by the pre-transform matrix and then projects it onto the quad +/// using the linear viewport transform. +/// +/// A perspective quad projection is represented by: +/// - a pre-transform matrix +/// - a linear viewport transform matrix +/// +/// The final projection matrix is the concatenation of those two transforms. +/// +/// @note One perspective quad projection can represent a face quad of a CAVE-like system. +class IPerspectiveProjection : public ILinearProjection +{ +public: + /// @brief One quad projection entry described by a pretransform and a viewport projection. + struct CProjection : ILinearProjection::CProjection + { + using base_t = ILinearProjection::CProjection; + + CProjection() = default; + CProjection(const ILinearProjection::model_matrix_t& pretransform, ILinearProjection::concatenated_matrix_t viewport) + { + setQuadTransform(pretransform, viewport); + } + + /// @brief Rebuild the concatenated quad projection from its authored components. + inline void setQuadTransform(const ILinearProjection::model_matrix_t& pretransform, ILinearProjection::concatenated_matrix_t viewport) + { + auto concatenated = hlsl::mul(hlsl::CCameraMathUtilities::promoteAffine3x4To4x4(pretransform), viewport); + base_t::setProjectionMatrix(concatenated); + + m_pretransform = pretransform; + m_viewport = viewport; + } + + /// @brief Return the authored pretransform applied before the viewport projection. + inline const ILinearProjection::model_matrix_t& getPretransform() const { return m_pretransform; } + /// @brief Return the authored viewport projection matrix stored for this quad. + inline const ILinearProjection::concatenated_matrix_t& getViewportProjection() const { return m_viewport; } + + private: + ILinearProjection::model_matrix_t m_pretransform = ILinearProjection::model_matrix_t(1); + ILinearProjection::concatenated_matrix_t m_viewport = ILinearProjection::concatenated_matrix_t(1); + }; + +protected: + IPerspectiveProjection(core::smart_refctd_ptr&& camera) + : ILinearProjection(core::smart_refctd_ptr(camera)) {} + virtual ~IPerspectiveProjection() = default; +}; + +} // nbl::hlsl namespace + +#endif // _NBL_I_QUAD_PROJECTION_HPP_ diff --git a/include/nbl/ext/Cameras/IPlanarProjection.hpp b/include/nbl/ext/Cameras/IPlanarProjection.hpp new file mode 100644 index 0000000000..459a91ad2e --- /dev/null +++ b/include/nbl/ext/Cameras/IPlanarProjection.hpp @@ -0,0 +1,103 @@ +#ifndef _NBL_I_PLANAR_PROJECTION_HPP_ +#define _NBL_I_PLANAR_PROJECTION_HPP_ + +#include "nbl/core/math/glslFunctions.h" +#include "nbl/builtin/hlsl/math/thin_lens_projection.hlsl" + +#include "IGimbalBindingLayout.hpp" +#include "ILinearProjection.hpp" + +namespace nbl::core +{ + +/// @brief Linear projection wrapper for one camera-facing planar viewport. +/// +/// The projection stores viewport-local binding layouts. Runtime input +/// processing is handled by `CGimbalInputBinder`. +class IPlanarProjection : public ILinearProjection +{ +public: + /// @brief One perspective or orthographic projection entry plus its viewport-local bindings. + struct CProjection : public ILinearProjection::CProjection + { + using base_t = ILinearProjection::CProjection; + + /// @brief Stable runtime classification of supported planar projection parameterizations. + enum ProjectionType : uint8_t + { + Perspective, + Orthographic, + + Count + }; + + template + static CProjection create(Args&&... args) + requires (T != Count) + { + CProjection output; + + if constexpr (T == Perspective) output.setPerspective(std::forward(args)...); + else if (T == Orthographic) output.setOrthographic(std::forward(args)...); + + return output; + } + + CProjection(const CProjection& other) = default; + CProjection(CProjection&& other) noexcept = default; + + /// @brief Authored parameter bundle stored by one planar projection entry. + struct ProjectionParameters + { + ProjectionType m_type; + + union PlanarParameters + { + struct + { + float fov; + } perspective; + + struct + { + float orthoWidth; + } orthographic; + + PlanarParameters() {} + ~PlanarParameters() {} + } m_planar; + + float m_zNear; + float m_zFar; + }; + + /// @brief Rebuild the concrete projection matrix from the stored parameters. + void update(bool leftHanded, float aspectRatio); + + /// @brief Switch the entry to perspective mode and store its authored parameters. + void setPerspective(float zNear = 0.1f, float zFar = 100.f, float fov = 60.f); + + /// @brief Switch the entry to orthographic mode and store its authored parameters. + void setOrthographic(float zNear = 0.1f, float zFar = 100.f, float orthoWidth = 10.f); + + /// @brief Return the authored planar projection parameters. + inline const ProjectionParameters& getParameters() const { return m_parameters; } + /// @brief Return the viewport-local input binding layout stored next to this projection entry. + inline const ui::IGimbalBindingLayout& getInputBinding() const { return m_inputBinding; } + /// @brief Return mutable access to the viewport-local input binding layout. + inline ui::IGimbalBindingLayout& getInputBinding() { return m_inputBinding; } + private: + CProjection() = default; + ProjectionParameters m_parameters; + ui::CGimbalBindingLayoutStorage m_inputBinding; + }; + +protected: + IPlanarProjection(core::smart_refctd_ptr&& camera) + : ILinearProjection(std::move(camera)) {} + virtual ~IPlanarProjection() = default; +}; + +} // namespace nbl::core + +#endif // _NBL_I_PLANAR_PROJECTION_HPP_ diff --git a/include/nbl/ext/Cameras/IProjection.hpp b/include/nbl/ext/Cameras/IProjection.hpp new file mode 100644 index 0000000000..b42cb01a32 --- /dev/null +++ b/include/nbl/ext/Cameras/IProjection.hpp @@ -0,0 +1,71 @@ +#ifndef _NBL_I_PROJECTION_HPP_ +#define _NBL_I_PROJECTION_HPP_ + +#include + +namespace nbl::core +{ + +/// @brief Base interface for any reusable projection model in the camera stack. +/// +/// A projection transforms vectors between some input space and the projection +/// space understood by a concrete viewport or projection consumer. Specialized +/// interfaces such as `ILinearProjection`, `IPlanarProjection`, and +/// `IPerspectiveProjection` refine this abstraction with additional structure. +class IProjection +{ +public: + /// @brief Common vector type used by projection and unprojection operations. + using projection_vector_t = hlsl::float64_t4; + + /// @brief Stable runtime classification of supported projection families. + enum class ProjectionType + { + /// @brief Any raw linear transformation, for example it may represent Perspective, Orthographic, Oblique, Axonometric, Shear projections + Linear, + + /// @brief Specialized linear projection for planar projections with parameters + Planar, + + /// @brief Extension of planar projection represented by pre-transform & planar transform combined projecting onto R3 cave quad + CaveQuad, + + /// @brief Specialized CaveQuad projection, represents planar projections onto cube with 6 quad cube faces + Cube, + + Spherical, + ThinLens, + + Count + }; + + IProjection() = default; + virtual ~IProjection() = default; + + /// @brief Transform a vector from its input space into projection space. + /// + /// @param vecToProjectionSpace Vector to transform into projection space. + /// @param output Result vector in projection space. + virtual void project(const projection_vector_t& vecToProjectionSpace, projection_vector_t& output) const = 0; + + /// @brief Transform a vector from projection space back to the original space. + /// + /// The inverse transform may fail because the original projection may be singular. + /// + /// @param vecFromProjectionSpace Vector in projection space. + /// @param output Result vector in the original space. + /// @return `true` when the inverse transform succeeded, otherwise `false`. + virtual bool unproject(const projection_vector_t& vecFromProjectionSpace, projection_vector_t& output) const = 0; + + /// @brief Return the specific projection family implemented by the concrete instance. + /// + /// Examples include linear, spherical, and thin-lens projections as defined + /// by `ProjectionType`. + /// + /// @return The type of this projection. + virtual ProjectionType getProjectionType() const = 0; +}; + +} // namespace nbl::core + +#endif // _NBL_IPROJECTION_HPP_ diff --git a/include/nbl/ext/Cameras/IRange.hpp b/include/nbl/ext/Cameras/IRange.hpp new file mode 100644 index 0000000000..b4084db2ae --- /dev/null +++ b/include/nbl/ext/Cameras/IRange.hpp @@ -0,0 +1,21 @@ +#ifndef _NBL_IRANGE_HPP_ +#define _NBL_IRANGE_HPP_ + +namespace nbl::core +{ + +/// @brief Minimal concepts used by camera persistence and tooling helpers. +template +concept GeneralPurposeRange = requires +{ + typename std::ranges::range_value_t; +}; + +template +concept ContiguousGeneralPurposeRangeOf = GeneralPurposeRange && +std::ranges::contiguous_range && +std::same_as, T>; + +} // namespace nbl::core + +#endif // _NBL_IRANGE_HPP_ diff --git a/include/nbl/ext/Cameras/README.md b/include/nbl/ext/Cameras/README.md new file mode 100644 index 0000000000..e207f84a99 --- /dev/null +++ b/include/nbl/ext/Cameras/README.md @@ -0,0 +1,997 @@ +# Shared Camera API + +This directory contains the reusable Nabla camera stack. + +The stack has two public faces: + +- a runtime face used to move cameras during one frame +- a typed face used to capture, store, restore, compare, replay, and validate camera state + +The runtime face is centered on [`ICamera.hpp`](ICamera.hpp). +The typed face is centered on [`CCameraGoal.hpp`](CCameraGoal.hpp) and [`CCameraGoalSolver.hpp`](CCameraGoalSolver.hpp). + +## TL;DR + +If you want to know which type to touch first, use this table. + +| I want to... | Use | +|---|---| +| move a camera from live input this frame | `ICamera::manipulate(...)` | +| convert keyboard or mouse input into camera commands | `IGimbalInputProcessor` or `CGimbalInputBinder` | +| pair one camera with one or more projection entries | `CPlanarProjection` and `IPlanarProjection::CProjection` | +| apply one absolute rigid pose request at runtime | `camera->manipulate({}, &referenceFrame)` | +| set exact position or exact orientation on `Free` and `FPS` | `referenceFrame` built from `camera->getGimbal()` | +| set one absolute typed state that can be reused later | `CCameraGoal` + `CCameraGoalSolver` | +| capture current camera state | `CCameraGoalSolver::capture...` | +| restore a camera from typed state | `CCameraGoalSolver::apply...` | +| save a named camera state | `CCameraPreset` | +| store camera states over time | `CCameraKeyframeTrack` | +| keep playback cursor state | `CCameraPlaybackTimeline` | +| make a camera follow a moving target | `CCameraFollowUtilities` | +| author compact scripted camera sequences | `CCameraSequenceScript` | +| execute frame-by-frame scripted payloads | `CCameraScriptedRuntime` | +| use the path-rig camera | `CPathCamera` and `SCameraPathModel` | + +## Quick start + +This section shows the common entry points before any deeper explanation. + +### 1. Live runtime camera control + +Use this when keyboard, mouse, or ImGuizmo should move the camera right now. + +```cpp +auto camera = core::make_smart_refctd_ptr(eye, target); + +ui::CGimbalInputBinder binder; +ui::CCameraInputBindingUtilities::applyDefaultCameraInputBindingPreset(binder, *camera); + +auto collected = binder.collectVirtualEvents(timestamp, { + .keyboardEvents = { keyEvents.data(), keyEvents.size() }, + .mouseEvents = { mouseEvents.data(), mouseEvents.size() }, + // .imguizmoEvents = { gizmoDeltaTransforms.data(), gizmoDeltaTransforms.size() }, +}); + +camera->manipulate(collected.events); +``` + +The update payload currently accepts: + +- `keyboardEvents` +- `mouseEvents` +- `imguizmoEvents` + +What happens here: + +1. device input is converted into semantic camera commands +2. the camera consumes those commands through `manipulate(...)` +3. the camera updates its gimbal pose + +The controller-side stack is: + +- `IGimbalBindingLayout` for the static mapping from device inputs to virtual events +- `IGimbalInputProcessor` for converting one frame of raw input into event magnitudes +- `CGimbalInputBinder` for the common runtime object that owns a layout and collects one frame of events +- `CCameraInputBindingUtilities` for shared preset layouts such as default `FPS`, `Orbit`, or `Path Rig` bindings + +The two common ways to start are: + +- apply one shared preset for a camera family +- write one binding layout explicitly + +**Question: How do I bind `FPS` to `WASD`?** + +Use the shared default binding preset for the active camera kind. + +```cpp +auto camera = core::make_smart_refctd_ptr(position, orientation); + +ui::CGimbalInputBinder binder; +ui::CCameraInputBindingUtilities::applyDefaultCameraInputBindingPreset(binder, *camera); +``` + +For `FPS`, the default preset gives you: + +- keyboard `W/S/A/D` -> forward, backward, left, right +- keyboard `I/K/J/L` -> tilt up, tilt down, pan left, pan right +- mouse relative movement -> look yaw and pitch + +For `Free`, the default preset adds `Q/E` for roll. + +For target-relative families and `Path Rig`, the default preset keeps the same physical inputs but maps them to the legal state space of that family. + +**Question: How do I define custom bindings?** + +Use one `IGimbalBindingLayout` implementation such as `CGimbalInputBinder` and write the mapping you want. + +```cpp +ui::CGimbalInputBinder binder; +const double customMoveGain = /* choose a sensitivity for this binding */; + +binder.updateKeyboardMapping([customMoveGain](auto& map) +{ + map.clear(); + map.emplace(ui::E_KEY_CODE::EKC_W, ui::IGimbalBindingLayout::CHashInfo(core::CVirtualGimbalEvent::MoveForward, customMoveGain)); + map.emplace(ui::E_KEY_CODE::EKC_S, ui::IGimbalBindingLayout::CHashInfo(core::CVirtualGimbalEvent::MoveBackward, customMoveGain)); + map.emplace(ui::E_KEY_CODE::EKC_A, ui::IGimbalBindingLayout::CHashInfo(core::CVirtualGimbalEvent::MoveLeft, customMoveGain)); + map.emplace(ui::E_KEY_CODE::EKC_D, ui::IGimbalBindingLayout::CHashInfo(core::CVirtualGimbalEvent::MoveRight, customMoveGain)); +}); +``` + +The same pattern works for: + +- mouse bindings through `updateMouseMapping(...)` +- ImGuizmo bindings through `updateImguizmoMapping(...)` + +**Question: How are `magnitude` values generated?** + +`CVirtualGimbalEvent::magnitude` is one non-negative scalar attached to one semantic command. + +It is not a raw device unit and it is not, by itself, the final world-space or angular motion applied by a camera. + +What stays stable at the API level is the meaning by event family: + +- translation events carry one controller-side translation amount +- rotation events carry one controller-side angular amount +- scale events carry one controller-side scale amount + +The binding layer maps raw producer values onto those amounts. Different sources may start from: + +- elapsed time for held input +- cursor deltas for relative mouse input +- scroll steps for wheel input +- world-space translation or angular deltas for gizmo-driven input + +That means exact numeric gains are binding policy, not API contract. The binding layer owns sensitivity and repeat-rate tuning. + +After the controller side emits virtual magnitudes, the camera runtime applies its own motion scales and legalizes the result to the concrete camera family. + +The motion pipeline is therefore: + +1. raw device input +2. binding-local gain +3. `CVirtualGimbalEvent { type, magnitude }` +4. camera-local motion scale +5. family-specific legalization and state update + +### 2. Projection is separate from camera state + +**Question: Where is `setProjectionMatrix(...)`?** + +There is no `camera->setProjectionMatrix(...)`. + +That is intentional. + +The camera API keeps runtime camera state and projection state separate: + +- `ICamera` owns pose and motion state +- `IProjection` and its derived types own projection state +- one projection wrapper references one camera when it needs `view`, `MV`, or `MVP` + +This keeps the pairing flexible: + +- one camera can be reused with different projection entries +- one viewport can switch projection preset without replacing the camera +- projection parameters such as FOV, orthographic width, near, and far do not have to live inside every camera kind + +The split looks like this: + +```cpp +auto camera = core::make_smart_refctd_ptr(eye, target); + +using projections_t = std::vector; +auto planar = core::CPlanarProjection::create(core::smart_refctd_ptr(camera)); + +planar->getPlanarProjections().push_back( + core::IPlanarProjection::CProjection::create< + core::IPlanarProjection::CProjection::Perspective>(0.1f, 100.0f, 60.0f)); + +planar->getPlanarProjections().push_back( + core::IPlanarProjection::CProjection::create< + core::IPlanarProjection::CProjection::Orthographic>(0.1f, 100.0f, 10.0f)); + +auto& projection = planar->getPlanarProjections()[0]; +projection.update(leftHanded, aspectRatio); + +const auto& view = camera->getGimbal().getViewMatrix(); +const auto& proj = projection.getProjectionMatrix(); +``` + +So the camera does not own projection parameters. + +Instead: + +- the camera owns `view` +- the projection entry owns `projection` +- the wrapper combines both when code needs `MV`, `MVP`, or viewport-local binding state + +When you want to change projection state, touch the projection layer: + +- `IPlanarProjection::CProjection::setPerspective(...)` +- `IPlanarProjection::CProjection::setOrthographic(...)` +- `IPlanarProjection::CProjection::update(...)` + +When you want to change pose or camera-family state, touch the camera layer: + +- `ICamera::manipulate(...)` +- `referenceFrame` +- `CCameraGoal` +- family-specific typed hooks such as `trySetSphericalTarget(...)` or `trySetPathState(...)` + +### 3. Apply one absolute rigid pose request + +Use this when you already have one rigid transform and want the camera to consume it through the normal runtime entry point. + +```cpp +const auto referenceFrame = + hlsl::CCameraMathUtilities::composeTransformMatrix(desiredPosition, desiredOrientation); + +if (camera->manipulate({}, &referenceFrame)) +{ + // reference frame was accepted and applied +} +``` + +`manipulate(...)` can return `false`. + +Common reasons are: + +- there were no virtual events and no `referenceFrame` +- the supplied `referenceFrame` was not a valid rigid orthonormal transform +- the concrete camera kind could not legalize the request into its own runtime state + +**Question: Why not just expose `setPosition(...)` and `setOrientation(...)` everywhere?** + +Because not every camera kind stores arbitrary rigid pose as its native state. + +`Free` can represent arbitrary position and orientation directly. + +`FPS` cannot. Its legal runtime state is: + +- world-space position +- yaw +- pitch +- upright orientation reconstructed from yaw and pitch + +Consider this `FPS` example: + +```cpp +const auto desiredPosition = hlsl::float64_t3(2.0, 1.0, -3.0); +const auto desiredOrientation = + hlsl::CCameraMathUtilities::makeQuaternionFromEulerDegreesYXZ( + hlsl::float64_t3(-15.0, 40.0, 25.0)); +``` + +The requested rigid pose contains `roll = 25 deg`. + +That roll is not legal for `FPS`. + +If the API exposed unrestricted `setOrientation(...)` and accepted that quaternion as-is, the runtime camera would no longer match the rules of the `FPS` rig. + +The current API does this instead: + +1. accept one rigid pose request through `referenceFrame` +2. project that pose onto the legal state space of the concrete camera kind +3. rebuild the final runtime pose from that legal state + +For `FPS` that means: + +- keep the requested position +- read forward direction from the rigid reference +- rebuild legal `pitch/yaw` +- reject arbitrary roll +- write back one upright `FPS` pose + +For `FPS`, that rejection applies to `referenceFrame`, not to stray roll virtual events. `CFPSCamera` advertises only translation plus pitch/yaw runtime control, so `RollLeft` and `RollRight` events are ignored by the `FPS` accumulator instead of causing failure. + +The same pattern applies to every camera family: + +- `Free` keeps the rigid pose directly +- `FPS` legalizes to upright `position + pitch/yaw` +- target-relative cameras legalize to `target + orbitUv + distance` +- `Path Rig` legalizes to `PathState` + +Use this path for: + +- one-shot runtime pose application +- ImGuizmo +- world-space or local-space pose anchoring + +### 4. Set exact position or exact orientation on `Free` and `FPS` + +Use this when the target camera is `Free` or `FPS` and you want to replace only one rigid-pose component. + +```cpp +const auto& gimbal = camera->getGimbal(); + +const auto newPosition = desiredPosition; +const auto keepOrientation = gimbal.getOrientation(); + +const auto referenceFrame = + hlsl::CCameraMathUtilities::composeTransformMatrix(newPosition, keepOrientation); + +camera->manipulate({}, &referenceFrame); +``` + +```cpp +const auto& gimbal = camera->getGimbal(); + +const auto keepPosition = gimbal.getPosition(); +const auto newOrientation = desiredOrientation; + +const auto referenceFrame = + hlsl::CCameraMathUtilities::composeTransformMatrix(keepPosition, newOrientation); + +camera->manipulate({}, &referenceFrame); +``` + +`Free` applies these requests exactly. + +`FPS` keeps the exact position but legalizes orientation to its upright `pitch/yaw` state. + +For constrained target-relative and path cameras, prefer family-specific typed state or `CCameraGoal` instead of describing this as an exact component setter. + +### 5. Set one absolute typed state + +Use this when the state should survive beyond one frame or should be reused by presets, follow, playback, persistence, or scripts. + +```cpp +core::CCameraGoal goal = {}; +goal.position = desiredPosition; +goal.orientation = desiredOrientation; + +core::CCameraGoalSolver solver; +auto apply = solver.applyDetailed(camera.get(), goal); + +if (apply.succeeded() && apply.changed()) +{ + // camera was updated during applyDetailed(...) +} +``` + +`applyDetailed(...)` does not build a deferred command object. +It immediately tries to apply `goal` to the runtime camera. + +The returned `apply` value is a report describing what happened: + +- whether the apply succeeded +- whether the camera actually changed +- whether the result was exact or approximate +- whether typed state was applied directly, virtual events were replayed, or both + +So the control flow is: + +1. build one `CCameraGoal` +2. call `applyDetailed(...)` +3. the solver immediately updates the camera if it can +4. inspect `apply` if you need status, exactness, or diagnostics + +Use `applyDetailed(...)` when you want that report. +Use `apply(...)` when you only need a plain success/failure boolean. + +**Question: How is this different from `composeTransformMatrix(...)` plus `camera->manipulate({}, &referenceFrame)`?** + +`referenceFrame` carries one rigid pose request: + +- position +- orientation + +That is enough when the job is "try to place the runtime camera at this pose now". + +`CCameraGoal` can carry more than one rigid pose: + +- pose +- target-relative state +- path state +- dynamic perspective state +- source metadata used by tooling + +So the two paths are different: + +- `referenceFrame` asks the runtime camera to legalize one rigid pose request +- `CCameraGoal` asks the solver to apply one typed camera state, using direct typed hooks when available and virtual-event replay when needed + +For `Free` and often `FPS`, both paths may end up close to the same result. + +For constrained families they are not equivalent, because one rigid pose does not fully describe family-specific state such as: + +- target position +- orbit angles plus distance +- `PathState` +- dynamic perspective parameters + +Rule of thumb: + +- use `referenceFrame` for one runtime rigid pose request now +- use `CCameraGoal` for one typed camera state that should be stored, compared, serialized, replayed, or applied later + +### 6. Set one absolute camera-family state + +Use this when you do not want a generic rigid pose and instead want to write the native state of one camera family. + +Target-relative cameras: + +```cpp +camera->trySetSphericalTarget(targetPosition); +camera->trySetSphericalDistance(distance); +``` + +Path camera: + +```cpp +core::ICamera::PathState path = { + .s = desiredS, + .u = desiredU, + .v = desiredV, + .roll = desiredRoll +}; + +camera->trySetPathState(path); +``` + +Use this path when you already have: + +- target-relative state +- path-rig state +- one other family-specific typed fragment exposed by `ICamera` + +### 7. Capture a camera and restore it later + +Use this when you want explicit camera state instead of one-frame runtime input. + +```cpp +core::CCameraGoalSolver solver; + +auto capture = solver.captureDetailed(camera.get()); +if (capture.canUseGoal()) +{ + auto apply = solver.applyDetailed(camera.get(), capture.goal); +} +``` + +What happens here: + +1. the solver reads runtime camera state +2. the solver writes that state into one `CCameraGoal` +3. the solver later applies that goal back to a camera + +### 8. Save a named camera state + +Use this when one camera state needs a user-facing name or identifier. + +```cpp +core::CCameraGoalSolver solver; + +auto capture = solver.captureDetailed(camera.get()); +if (capture.canUseGoal()) +{ + core::CCameraPreset preset; + preset.name = "Overview"; + preset.identifier = "overview"; + core::CCameraPresetUtilities::assignGoalToPreset(preset, capture.goal); +} +``` + +### 9. Make a camera follow a moving target + +Use this when one tracked subject should drive camera behavior. + +```cpp +core::CTrackedTarget trackedTarget(position, orientation); + +core::SCameraFollowConfig follow = {}; +follow.enabled = true; +follow.mode = core::ECameraFollowMode::LookAtTarget; + +core::CCameraGoalSolver solver; +core::CCameraFollowUtilities::applyFollowToCamera(solver, camera.get(), trackedTarget, follow); +``` + +### 10. Build and evaluate scripted runtime payloads + +Use this when camera playback is authored as compact camera-domain data and then evaluated through generic per-frame runtime payloads and checks. + +```cpp +core::CCameraScriptedTimeline timeline; +core::CCameraScriptedRuntimeUtilities::finalizeScriptedTimeline(timeline); +``` + +## Core concepts + +### `CVirtualGimbalEvent` + +Defined in [`CVirtualGimbalEvent.hpp`](CVirtualGimbalEvent.hpp). + +`CVirtualGimbalEvent` is one semantic camera command plus one scalar magnitude. + +The scalar magnitude is a controller-side virtual amount emitted after binding +gains are applied. It is not a raw device delta and it is not, by itself, the +final world-space motion applied by a camera. + +Examples: + +- `MoveForward` +- `MoveLeft` +- `MoveUp` +- `TiltUp` +- `PanRight` +- `RollLeft` +- `ScaleZInc` + +The event does not store device-specific origin. +The same event type can come from keyboard input, mouse input, ImGuizmo, scripted playback, or replay helpers. + +### `IGimbal` + +Defined in [`IGimbal.hpp`](IGimbal.hpp) and used by [`ICamera.hpp`](ICamera.hpp). + +The gimbal stores runtime pose: + +- position +- orientation +- scale +- orthonormal basis + +It also accumulates one frame of semantic events into a `VirtualImpulse`. + +`ICamera::CGimbal` extends the base gimbal with a cached world-to-view matrix. + +Every runtime camera owns one `CGimbal`. + +### `ICamera` + +Defined in [`ICamera.hpp`](ICamera.hpp). + +`ICamera` is the shared runtime interface implemented by every camera kind. + +Its main job is: + +- consume one frame of semantic virtual events +- optionally consume one rigid reference frame +- update internal camera state +- update runtime pose in the gimbal + +Important members: + +- `manipulate(...)` +- `getGimbal()` +- `getAllowedVirtualEvents()` +- `getKind()` +- `getCapabilities()` +- typed hooks such as `tryGetSphericalTargetState(...)` and `tryGetPathState(...)` + +Each camera also stores one local motion-scale bundle in `SMotionConfig`. +Those scales are applied after the binding layer emits virtual magnitudes. + +### `referenceFrame` + +Defined by [`ICamera.hpp`](ICamera.hpp) and [`IGimbal.hpp`](IGimbal.hpp). + +`referenceFrame` is the optional rigid transform passed to `ICamera::manipulate(...)`. + +It is the runtime pose anchor for one manipulation step. + +Typical producers: + +- ImGuizmo +- restore helpers +- replay helpers +- code that wants world-space or local-space manipulation anchored to a specific rigid transform + +See Quick start sections 1 to 4 for the concrete runtime usage patterns. + +Shared runtime pattern: + +```text +referenceFrame + -> extract rigid reference transform + -> resolve legal state for this camera kind + -> accumulate virtual events + -> apply deltas in that state space + -> rebuild pose +``` + +### `SCameraRigPose` + +Defined in [`SCameraRigPose.hpp`](SCameraRigPose.hpp). + +`SCameraRigPose` stores only: + +- world-space position +- world-space orientation + +It is the smallest typed pose object reused across the stack. + +### `CCameraGoal` + +Defined in [`CCameraGoal.hpp`](CCameraGoal.hpp). + +`CCameraGoal` is the canonical typed transport for camera state. + +You can think of it as: + +> one explicit camera-state snapshot used by higher-level tools + +It may contain: + +- pose +- target position +- target-relative distance +- orbit state +- path state +- dynamic perspective state +- source camera metadata + +It is used by: + +- capture +- restore +- preset flow +- playback +- follow +- scripted checks + +It is not: + +- a live input object +- a replacement for `manipulate(...)` +- a promise that every camera can represent every arbitrary pose exactly + +For constrained cameras, the solver may project the goal onto legal camera-family state before or during apply. + +### `CCameraGoalSolver` + +Defined in [`CCameraGoalSolver.hpp`](CCameraGoalSolver.hpp). + +`CCameraGoalSolver` converts between typed camera state and runtime cameras. + +It captures runtime cameras into `CCameraGoal`, analyzes whether a target camera can represent that goal directly, and applies the result either through typed state or through runtime replay when needed. + +If you want to restore one absolute camera state and you are not sure which family-specific hook to call, use `CCameraGoalSolver`. + +### `CCameraPreset` + +Defined in [`CCameraPreset.hpp`](CCameraPreset.hpp). + +`CCameraPreset` is a named saved `CCameraGoal`. + +It contains: + +- `name` +- `identifier` +- `goal` + +### `CCameraKeyframeTrack` + +Defined in [`CCameraKeyframeTrack.hpp`](CCameraKeyframeTrack.hpp). + +`CCameraKeyframeTrack` is a sequence of time-stamped presets. + +Each keyframe contains: + +- one preset +- one authored time + +### `CCameraPlaybackTimeline` + +Defined in [`CCameraPlaybackTimeline.hpp`](CCameraPlaybackTimeline.hpp). + +`CCameraPlaybackTimeline` stores playback cursor state over time-based camera data. + +It tracks things such as: + +- current time +- direction +- looping +- paused or playing state + +### `CTrackedTarget` + +Defined in [`CCameraFollowUtilities.hpp`](CCameraFollowUtilities.hpp). + +`CTrackedTarget` is the reusable tracked subject used by follow. + +It owns its own gimbal. +It is not a mesh id and not a scene-node handle. + +### `CCameraSequenceScript` + +Defined in [`CCameraSequenceScript.hpp`](CCameraSequenceScript.hpp). + +`CCameraSequenceScript` is the compact authored format for camera sequences. + +It stores camera-domain data such as: + +- targeted camera +- projection presentation requests +- camera keyframes +- tracked-target keyframes +- continuity settings +- capture fractions + +It does not store frame-by-frame low-level input. + +### `CCameraScriptedRuntime` + +Defined in [`CCameraScriptedRuntime.hpp`](CCameraScriptedRuntime.hpp). + +`CCameraScriptedRuntime` is the expanded executable form used during scripted playback and validation. + +It stores runtime payloads such as: + +- low-level input events +- goal and tracked-target events +- per-frame checks +- capture scheduling + +Consumer-specific UI actions stay outside this shared runtime payload. + +### `Path Rig` + +Defined by [`CPathCamera.hpp`](CPathCamera.hpp), [`CCameraPathUtilities.hpp`](CCameraPathUtilities.hpp), and [`CCameraPathMetadata.hpp`](CCameraPathMetadata.hpp). + +`Path Rig` is the camera family with typed state: + +- `s` +- `u` +- `v` +- `roll` + +Its runtime and typed tooling are driven by `SCameraPathModel`, which defines how path state is resolved, updated, and converted back into camera pose. + +At the API boundary, you can think of `Path Rig` as one parametric camera map that turns typed path state into pose. + +In other words, the reusable seam is not "one built-in rail camera". It is: + +$$ +f : (t, q, L) \mapsto (p, o) +$$ + +with: + +- $t \in \mathbb{R}^3$: + world-space target or anchor position used by the model +- $q \in \mathcal{Q}$: + typed path state +- $L \in \mathcal{L}$: + path-state limits +- $p \in \mathbb{R}^3$: + evaluated world-space camera position +- $o \in \mathrm{SO}(3)$: + evaluated camera orientation + +In the shared built-in state representation, + +$$ +\mathcal{Q} = S^1 \times \mathbb{R} \times \mathbb{R} \times S^1 +$$ + +with + +$$ +q = (s, u, v, \rho), +$$ + +where: + +- $s \in S^1$ is one wrapped angular parameter +- $u \in \mathbb{R}$ is one lateral or radial parameter +- $v \in \mathbb{R}$ is one second shape or height parameter +- $\rho \in S^1$ is authored roll around the model forward axis + +and the limit bundle is + +$$ +\mathcal{L} = \{(u_{\min}, d_{\min}, d_{\max})\}, +$$ + +where: + +- $u_{\min} \in \mathbb{R}_{\ge 0}$ is the minimal legal `u` +- $d_{\min} \in \mathbb{R}_{\ge 0}$ is the minimal legal radial distance +- $d_{\max} \in \mathbb{R}_{\ge 0} \cup \{\infty\}$ is the maximal legal radial distance + +The important part is that one caller-provided model decides how typed state becomes final camera pose. + +This is why the same API seam can model many constrained motions: circles, cylinders, orbits, guide curves, splines with offsets, crane-style rigs, banking on-rails cameras, and other custom parametric camera laws. + +If you already have one path curve + +$$ +C(s) \in \mathbb{R}^3 +$$ + +and one moving local frame + +$$ +R(s), U(s), F(s) \in \mathbb{R}^3, +$$ + +then one representative evaluator has the shape + +$$ +p(s,u,v) = C(s) + u\,R(s) + v\,U(s), +$$ + +with orientation built from the basis + +$$ +(R(s), U(s), F(s)) +$$ + +and then rotated by authored roll $\rho$ around the current forward axis. + +The built-in model below is just one concrete default implementation of that seam. +It happens to have one simple closed-form `resolveState(...)` from world-space position, but custom models only need to provide a legal state-resolution callback. They do not need one strict analytical inverse of the evaluator. + +**Default built-in model** + +If you do not supply your own `SCameraPathModel`, `CPathCamera` uses the built-in cylindrical model. + +That default model uses a cylindrical parameterization around the current target position +$t = (t_x, t_y, t_z)$ with typed state +$q = (s, u, v, \rho)$: + +$$ +\begin{aligned} +x &= t_x + u \cos s \\ +y &= t_y + v \\ +z &= t_z + u \sin s +\end{aligned} +$$ + +That means: + +- `s` is the authored angle around the target in the world `XZ` plane +- `u` is the planar `XZ` radius +- `v` is the vertical offset on world `Y` +- `roll` is an extra rotation applied around the resulting forward axis + +For the built-in model, `resolveState(...)` from one world-space position can be written as: + +$$ +\begin{aligned} +\Delta &= p - t \\ +s &= \mathrm{wrap}(\mathrm{atan2}(\Delta_z, \Delta_x)) \\ +u &= \max(u_{\min}, \sqrt{\Delta_x^2 + \Delta_z^2}) \\ +v &= \Delta_y +\end{aligned} +$$ + +The default model also derives one radial camera distance from `(u, v)`: + +$$ +d = \sqrt{u^2 + v^2} +$$ + +and sanitizes state as: + +$$ +\begin{aligned} +s &\leftarrow \mathrm{wrap}(s) \\ +u &\leftarrow \max(u_{\min}, u) \\ +\rho &\leftarrow \mathrm{wrap}(\rho) +\end{aligned} +$$ + +The base orientation is then built from the camera looking from the resolved position back at the target, and the authored roll is applied around that resulting forward axis. + +The built-in control law maps runtime local motion into path-state delta as: + +$$ +\Delta q = (\Delta s, \Delta u, \Delta v, \Delta \rho)^{\mathsf{T}} += (\Delta z_{\mathrm{local}}, \Delta x_{\mathrm{local}}, \Delta y_{\mathrm{local}}, \Delta \mathrm{roll})^{\mathsf{T}} +$$ + +and integrates it as: + +$$ +q_{n+1} = \mathrm{sanitize}(q_n + \Delta q) +$$ + +Equivalent pseudocode for the built-in model is: + +```cpp +PathState state = sanitize(inputState, limits); + +const double appliedU = max(limits.minU, state.u); +const dvec3 offset = { + cos(state.s) * appliedU, + state.v, + sin(state.s) * appliedU +}; + +const dvec3 requestedPosition = target + offset; +const auto [orbitUv, distance] = + buildOrbitFromPosition(target, requestedPosition, limits.minDistance, limits.maxDistance); + +auto [position, orientation] = + buildSphericalPoseFromOrbit(target, orbitUv, distance, limits.minDistance, limits.maxDistance); + +if (state.roll != 0.0) + orientation = applyRollAroundCurrentForward(orientation, state.roll); + +PathDelta delta = { + .s = localTranslation.z, + .u = localTranslation.x, + .v = localTranslation.y, + .roll = localRotation.z +}; + +state = sanitize(state + delta, limits); +``` + +This is intentionally more general than one hardcoded "rail camera". + +The built-in model shown above is only one concrete parameterization. +The reusable part is `SCameraPathModel`, which lets the runtime reinterpret the same typed `PathState` seam through custom: + +- state resolution +- control law +- integration +- pose evaluation +- distance update + +In practice that means the same `Path Rig` family can be used for many constrained camera designs, for example: + +- cylindrical and orbital rigs around one subject +- dolly or crane-style motion with authored lateral and vertical offsets +- cameras constrained to one spline or guide path with side/up offsets +- banked path cameras where `roll` becomes authored banking around the current forward axis +- on-rails gameplay or cinematic cameras with one path parameter plus local offsets +- custom path-following rigs that keep the runtime API and typed tooling unchanged while replacing only the path model + +So the important boundary is: + +- the built-in model is one cylindrical target-relative parameterization +- the `Path Rig` API surface is the extensible typed seam for path-driven camera families + +It can represent a large class of practical constrained camera motions, but it is still not "arbitrary free pose". +If a camera must store completely unconstrained 6DOF pose as its native state, use `Free`. + +## Camera families + +- `Free` cameras in [`CFPSCamera.hpp`](CFPSCamera.hpp) and [`CFreeLockCamera.hpp`](CFreeLockCamera.hpp) store world-space position plus free or FPS-constrained orientation. +- Target-relative cameras are built on [`CSphericalTargetCamera.hpp`](CSphericalTargetCamera.hpp) and include [`COrbitCamera.hpp`](COrbitCamera.hpp), [`CArcballCamera.hpp`](CArcballCamera.hpp), [`CTurntableCamera.hpp`](CTurntableCamera.hpp), [`CTopDownCamera.hpp`](CTopDownCamera.hpp), [`CIsometricCamera.hpp`](CIsometricCamera.hpp), [`CChaseCamera.hpp`](CChaseCamera.hpp), [`CDollyCamera.hpp`](CDollyCamera.hpp), and [`CDollyZoomCamera.hpp`](CDollyZoomCamera.hpp). They store target position, `orbitUv`, and distance instead of arbitrary free pose. +- [`CDollyZoomCamera.hpp`](CDollyZoomCamera.hpp) extends the target-relative family with dynamic perspective state `baseFov` and `referenceDistance`. +- [`CPathCamera.hpp`](CPathCamera.hpp) uses the parametric path-state seam described above together with limits `minU`, `minDistance`, and `maxDistance`. + +## Typed tooling + +Use the typed layer when camera state must outlive the current frame or be exchanged between tools: + +1. `SCameraRigPose` is the smallest typed pose fragment. +2. `CCameraGoal` is the canonical typed transport for camera state. +3. `CCameraGoalSolver` captures runtime cameras into goals and applies goals back to runtime cameras. +4. `CCameraPreset` gives one goal a stable user-facing identity. +5. `CCameraKeyframeTrack` stores presets over authored time. +6. `CCameraPlaybackTimeline` stores playback cursor state while a track is being evaluated. + +## Follow + +Follow lives in [`CCameraFollowUtilities.hpp`](CCameraFollowUtilities.hpp) and [`CCameraFollowRegressionUtilities.hpp`](CCameraFollowRegressionUtilities.hpp). It combines one `CTrackedTarget`, one follow mode, and one follow configuration, then builds the resulting camera goal and applies it through the shared goal solver. Available modes are `OrbitTarget`, `LookAtTarget`, `KeepWorldOffset`, and `KeepLocalOffset`. + +## Scripting + +### Compact authored format + +[`CCameraSequenceScript.hpp`](CCameraSequenceScript.hpp) and [`CCameraSequenceScriptPersistence.hpp`](CCameraSequenceScriptPersistence.hpp) store authored camera-domain data. + +### Expanded runtime format + +[`CCameraScriptedRuntime.hpp`](CCameraScriptedRuntime.hpp) and [`CCameraScriptedCheckRunner.hpp`](CCameraScriptedCheckRunner.hpp) store executable per-frame runtime payloads and validation checks. + +Common flow: + +```text +compact authored sequence + -> compile or expand + -> scripted runtime payload + -> execute against runtime camera state +``` + +## Projection and presentation helpers + +Projection types live in [`IProjection.hpp`](IProjection.hpp), [`ILinearProjection.hpp`](ILinearProjection.hpp), [`IPerspectiveProjection.hpp`](IPerspectiveProjection.hpp), [`IPlanarProjection.hpp`](IPlanarProjection.hpp), [`CLinearProjection.hpp`](CLinearProjection.hpp), [`CPlanarProjection.hpp`](CPlanarProjection.hpp), and [`CCubeProjection.hpp`](CCubeProjection.hpp). + +Camera-facing presentation helpers live in [`CCameraPresentationUtilities.hpp`](CCameraPresentationUtilities.hpp), [`CCameraProjectionUtilities.hpp`](CCameraProjectionUtilities.hpp), [`CCameraTextUtilities.hpp`](CCameraTextUtilities.hpp), [`CCameraViewportOverlayUtilities.hpp`](CCameraViewportOverlayUtilities.hpp), [`CCameraControlPanelUiUtilities.hpp`](CCameraControlPanelUiUtilities.hpp), and [`CCameraScriptVisualDebugOverlayUtilities.hpp`](CCameraScriptVisualDebugOverlayUtilities.hpp). diff --git a/include/nbl/ext/Cameras/SCameraRigPose.hpp b/include/nbl/ext/Cameras/SCameraRigPose.hpp new file mode 100644 index 0000000000..f8f3d7dfed --- /dev/null +++ b/include/nbl/ext/Cameras/SCameraRigPose.hpp @@ -0,0 +1,26 @@ +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _S_CAMERA_RIG_POSE_HPP_ +#define _S_CAMERA_RIG_POSE_HPP_ + +#include "CCameraMathUtilities.hpp" + +namespace nbl::core +{ + +/// @brief Canonical camera pose consisting of world-space position and orientation. +/// +/// This type stores only pose data. Higher-level types add target-relative, +/// dynamic-perspective, or path-specific state around it. +struct SCameraRigPose +{ + /// @brief Camera origin in world space. + hlsl::float64_t3 position = hlsl::float64_t3(0.0); + /// @brief Camera orientation in world space expressed as a unit quaternion. + hlsl::camera_quaternion_t orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion(); +}; + +} // namespace nbl::core + +#endif // _S_CAMERA_RIG_POSE_HPP_ diff --git a/include/nbl/ui/KeyCodes.h b/include/nbl/ui/KeyCodes.h index b6d05aed36..40f6726512 100644 --- a/include/nbl/ui/KeyCodes.h +++ b/include/nbl/ui/KeyCodes.h @@ -1,6 +1,9 @@ #ifndef _NBL_UI_KEYCODES_H_INCLUDED_ #define _NBL_UI_KEYCODES_H_INCLUDED_ +#include +#include + namespace nbl::ui { @@ -276,5 +279,205 @@ enum E_MOUSE_BUTTON : uint8_t EMB_COUNT, }; +// Unambiguous set of "codes" to represent various mouse actions we support with Nabla - equivalent of E_KEY_CODE +enum E_MOUSE_CODE : uint8_t +{ + EMC_NONE = 0, + + // I know its E_MOUSE_BUTTON, this enum *must* be more abstract to standardize mouse + EMC_LEFT_BUTTON, + EMC_RIGHT_BUTTON, + EMC_MIDDLE_BUTTON, + EMC_BUTTON_4, + EMC_BUTTON_5, + + // and this is kinda SMouseEvent::E_EVENT_TYPE::EET_SCROLL + EMC_VERTICAL_POSITIVE_SCROLL, + EMC_VERTICAL_NEGATIVE_SCROLL, + EMC_HORIZONTAL_POSITIVE_SCROLL, + EMC_HORIZONTAL_NEGATIVE_SCROLL, + + // SMouseEvent::E_EVENT_TYPE::EET_MOVEMENT + EMC_RELATIVE_POSITIVE_MOVEMENT_X, + EMC_RELATIVE_POSITIVE_MOVEMENT_Y, + EMC_RELATIVE_NEGATIVE_MOVEMENT_X, + EMC_RELATIVE_NEGATIVE_MOVEMENT_Y, + + EMC_COUNT, +}; + +namespace impl +{ + +template +struct SNamedCode final +{ + std::string_view name; + Code code; +}; + +template +constexpr Code lookupNamedCode(std::string_view str, const std::array, N>& table, const Code fallback) +{ + for (const auto& entry : table) + { + if (str == entry.name) + return entry.code; + } + + return fallback; +} + +constexpr char asciiToUpper(const char c) +{ + return (c >= 'a' && c <= 'z') ? static_cast(c - ('a' - 'A')) : c; +} + +static constexpr auto NamedKeyCodes = std::to_array>({ + { "BACKSPACE", E_KEY_CODE::EKC_BACKSPACE }, + { "TAB", E_KEY_CODE::EKC_TAB }, + { "CLEAR", E_KEY_CODE::EKC_CLEAR }, + { "ENTER", E_KEY_CODE::EKC_ENTER }, + { "LEFT_SHIFT", E_KEY_CODE::EKC_LEFT_SHIFT }, + { "RIGHT_SHIFT", E_KEY_CODE::EKC_RIGHT_SHIFT }, + { "LEFT_CONTROL", E_KEY_CODE::EKC_LEFT_CONTROL }, + { "RIGHT_CONTROL", E_KEY_CODE::EKC_RIGHT_CONTROL }, + { "LEFT_ALT", E_KEY_CODE::EKC_LEFT_ALT }, + { "RIGHT_ALT", E_KEY_CODE::EKC_RIGHT_ALT }, + { "PAUSE", E_KEY_CODE::EKC_PAUSE }, + { "CAPS_LOCK", E_KEY_CODE::EKC_CAPS_LOCK }, + { "ESCAPE", E_KEY_CODE::EKC_ESCAPE }, + { "SPACE", E_KEY_CODE::EKC_SPACE }, + { "PAGE_UP", E_KEY_CODE::EKC_PAGE_UP }, + { "PAGE_DOWN", E_KEY_CODE::EKC_PAGE_DOWN }, + { "END", E_KEY_CODE::EKC_END }, + { "HOME", E_KEY_CODE::EKC_HOME }, + { "LEFT_ARROW", E_KEY_CODE::EKC_LEFT_ARROW }, + { "RIGHT_ARROW", E_KEY_CODE::EKC_RIGHT_ARROW }, + { "DOWN_ARROW", E_KEY_CODE::EKC_DOWN_ARROW }, + { "UP_ARROW", E_KEY_CODE::EKC_UP_ARROW }, + { "SELECT", E_KEY_CODE::EKC_SELECT }, + { "PRINT", E_KEY_CODE::EKC_PRINT }, + { "EXECUTE", E_KEY_CODE::EKC_EXECUTE }, + { "PRINT_SCREEN", E_KEY_CODE::EKC_PRINT_SCREEN }, + { "INSERT", E_KEY_CODE::EKC_INSERT }, + { "DELETE", E_KEY_CODE::EKC_DELETE }, + { "HELP", E_KEY_CODE::EKC_HELP }, + { "LEFT_WIN", E_KEY_CODE::EKC_LEFT_WIN }, + { "RIGHT_WIN", E_KEY_CODE::EKC_RIGHT_WIN }, + { "APPS", E_KEY_CODE::EKC_APPS }, + { "COMMA", E_KEY_CODE::EKC_COMMA }, + { "PERIOD", E_KEY_CODE::EKC_PERIOD }, + { "SEMICOLON", E_KEY_CODE::EKC_SEMICOLON }, + { "OPEN_BRACKET", E_KEY_CODE::EKC_OPEN_BRACKET }, + { "CLOSE_BRACKET", E_KEY_CODE::EKC_CLOSE_BRACKET }, + { "BACKSLASH", E_KEY_CODE::EKC_BACKSLASH }, + { "APOSTROPHE", E_KEY_CODE::EKC_APOSTROPHE }, + { "ADD", E_KEY_CODE::EKC_ADD }, + { "SUBTRACT", E_KEY_CODE::EKC_SUBTRACT }, + { "MULTIPLY", E_KEY_CODE::EKC_MULTIPLY }, + { "DIVIDE", E_KEY_CODE::EKC_DIVIDE }, + { "F1", E_KEY_CODE::EKC_F1 }, + { "F2", E_KEY_CODE::EKC_F2 }, + { "F3", E_KEY_CODE::EKC_F3 }, + { "F4", E_KEY_CODE::EKC_F4 }, + { "F5", E_KEY_CODE::EKC_F5 }, + { "F6", E_KEY_CODE::EKC_F6 }, + { "F7", E_KEY_CODE::EKC_F7 }, + { "F8", E_KEY_CODE::EKC_F8 }, + { "F9", E_KEY_CODE::EKC_F9 }, + { "F10", E_KEY_CODE::EKC_F10 }, + { "F11", E_KEY_CODE::EKC_F11 }, + { "F12", E_KEY_CODE::EKC_F12 }, + { "F13", E_KEY_CODE::EKC_F13 }, + { "F14", E_KEY_CODE::EKC_F14 }, + { "F15", E_KEY_CODE::EKC_F15 }, + { "F16", E_KEY_CODE::EKC_F16 }, + { "F17", E_KEY_CODE::EKC_F17 }, + { "F18", E_KEY_CODE::EKC_F18 }, + { "F19", E_KEY_CODE::EKC_F19 }, + { "F20", E_KEY_CODE::EKC_F20 }, + { "F21", E_KEY_CODE::EKC_F21 }, + { "F22", E_KEY_CODE::EKC_F22 }, + { "F23", E_KEY_CODE::EKC_F23 }, + { "F24", E_KEY_CODE::EKC_F24 }, + { "NUMPAD_0", E_KEY_CODE::EKC_NUMPAD_0 }, + { "NUMPAD_1", E_KEY_CODE::EKC_NUMPAD_1 }, + { "NUMPAD_2", E_KEY_CODE::EKC_NUMPAD_2 }, + { "NUMPAD_3", E_KEY_CODE::EKC_NUMPAD_3 }, + { "NUMPAD_4", E_KEY_CODE::EKC_NUMPAD_4 }, + { "NUMPAD_5", E_KEY_CODE::EKC_NUMPAD_5 }, + { "NUMPAD_6", E_KEY_CODE::EKC_NUMPAD_6 }, + { "NUMPAD_7", E_KEY_CODE::EKC_NUMPAD_7 }, + { "NUMPAD_8", E_KEY_CODE::EKC_NUMPAD_8 }, + { "NUMPAD_9", E_KEY_CODE::EKC_NUMPAD_9 }, + { "NUM_LOCK", E_KEY_CODE::EKC_NUM_LOCK }, + { "SCROLL_LOCK", E_KEY_CODE::EKC_SCROLL_LOCK }, + { "VOLUME_MUTE", E_KEY_CODE::EKC_VOLUME_MUTE }, + { "VOLUME_UP", E_KEY_CODE::EKC_VOLUME_UP }, + { "VOLUME_DOWN", E_KEY_CODE::EKC_VOLUME_DOWN } +}); + +static constexpr auto NamedMouseCodes = std::to_array>({ + { "LEFT_BUTTON", E_MOUSE_CODE::EMC_LEFT_BUTTON }, + { "RIGHT_BUTTON", E_MOUSE_CODE::EMC_RIGHT_BUTTON }, + { "MIDDLE_BUTTON", E_MOUSE_CODE::EMC_MIDDLE_BUTTON }, + { "BUTTON_4", E_MOUSE_CODE::EMC_BUTTON_4 }, + { "BUTTON_5", E_MOUSE_CODE::EMC_BUTTON_5 }, + { "VERTICAL_POSITIVE_SCROLL", E_MOUSE_CODE::EMC_VERTICAL_POSITIVE_SCROLL }, + { "VERTICAL_NEGATIVE_SCROLL", E_MOUSE_CODE::EMC_VERTICAL_NEGATIVE_SCROLL }, + { "HORIZONTAL_POSITIVE_SCROLL", E_MOUSE_CODE::EMC_HORIZONTAL_POSITIVE_SCROLL }, + { "HORIZONTAL_NEGATIVE_SCROLL", E_MOUSE_CODE::EMC_HORIZONTAL_NEGATIVE_SCROLL }, + { "RELATIVE_POSITIVE_MOVEMENT_X", E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_X }, + { "RELATIVE_POSITIVE_MOVEMENT_Y", E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_Y }, + { "RELATIVE_NEGATIVE_MOVEMENT_X", E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_X }, + { "RELATIVE_NEGATIVE_MOVEMENT_Y", E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_Y } +}); + +} // namespace impl + +constexpr E_KEY_CODE stringToKeyCode(std::string_view str) +{ + if (str.size() == 1u) + { + const char upper = impl::asciiToUpper(str.front()); + if (upper >= 'A' && upper <= 'Z') + return static_cast(upper); + if (upper >= '0' && upper <= '9') + return static_cast(upper); + } + + return impl::lookupNamedCode(str, impl::NamedKeyCodes, E_KEY_CODE::EKC_NONE); +} + +constexpr std::string_view mouseCodeToString(E_MOUSE_CODE code) +{ + switch (code) + { + case EMC_LEFT_BUTTON: return "LEFT_BUTTON"; + case EMC_RIGHT_BUTTON: return "RIGHT_BUTTON"; + case EMC_MIDDLE_BUTTON: return "MIDDLE_BUTTON"; + case EMC_BUTTON_4: return "BUTTON_4"; + case EMC_BUTTON_5: return "BUTTON_5"; + + case EMC_VERTICAL_POSITIVE_SCROLL: return "VERTICAL_POSITIVE_SCROLL"; + case EMC_VERTICAL_NEGATIVE_SCROLL: return "VERTICAL_NEGATIVE_SCROLL"; + case EMC_HORIZONTAL_POSITIVE_SCROLL: return "HORIZONTAL_POSITIVE_SCROLL"; + case EMC_HORIZONTAL_NEGATIVE_SCROLL: return "HORIZONTAL_NEGATIVE_SCROLL"; + + case EMC_RELATIVE_POSITIVE_MOVEMENT_X: return "RELATIVE_POSITIVE_MOVEMENT_X"; + case EMC_RELATIVE_POSITIVE_MOVEMENT_Y: return "RELATIVE_POSITIVE_MOVEMENT_Y"; + case EMC_RELATIVE_NEGATIVE_MOVEMENT_X: return "RELATIVE_NEGATIVE_MOVEMENT_X"; + case EMC_RELATIVE_NEGATIVE_MOVEMENT_Y: return "RELATIVE_NEGATIVE_MOVEMENT_Y"; + + default: return "NONE"; + } +} + +constexpr E_MOUSE_CODE stringToMouseCode(std::string_view str) +{ + return impl::lookupNamedCode(str, impl::NamedMouseCodes, E_MOUSE_CODE::EMC_NONE); +} + } #endif diff --git a/src/nbl/ext/CMakeLists.txt b/src/nbl/ext/CMakeLists.txt index f3b55531c2..43fd1aeb20 100644 --- a/src/nbl/ext/CMakeLists.txt +++ b/src/nbl/ext/CMakeLists.txt @@ -86,6 +86,16 @@ set(NBL_EXT_FULL_SCREEN_TRIANGLE_LIB PARENT_SCOPE ) +add_subdirectory(Cameras) +set(NBL_EXT_CAMERAS_INCLUDE_DIRS + ${NBL_EXT_Cameras_INCLUDE_DIRS} + PARENT_SCOPE +) +set(NBL_EXT_CAMERAS_LIB + ${NBL_EXT_Cameras_LIB} + PARENT_SCOPE +) + propagate_changed_variables_to_parent_scope() NBL_ADJUST_FOLDERS(ext) diff --git a/src/nbl/ext/Cameras/CCameraFollowRegressionUtilities.cpp b/src/nbl/ext/Cameras/CCameraFollowRegressionUtilities.cpp new file mode 100644 index 0000000000..ce7491aed4 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraFollowRegressionUtilities.cpp @@ -0,0 +1,278 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraFollowRegressionUtilities.hpp" + +namespace nbl::system +{ + +SCameraFollowRegressionThresholds CCameraFollowRegressionUtilities::makeFollowRegressionThresholds( + const float projectedNdcTolerance, + const float lockAngleToleranceDeg) +{ + auto thresholds = SCameraFollowRegressionThresholds{}; + thresholds.projectedNdcTolerance = projectedNdcTolerance; + thresholds.lockAngleToleranceDeg = lockAngleToleranceDeg; + return thresholds; +} + +bool CCameraFollowRegressionUtilities::tryComputeProjectedFollowTargetMetrics( + const SCameraProjectionContext& projectionContext, + const core::CTrackedTarget& trackedTarget, + SCameraProjectedTargetMetrics& outMetrics, + const float clipWEpsilon) +{ + outMetrics = {}; + const hlsl::float32_t3 target = hlsl::CCameraMathUtilities::castVector(trackedTarget.getGimbal().getPosition()); + const auto viewSpace = hlsl::mul(projectionContext.viewMatrix, hlsl::float32_t4(target.x, target.y, target.z, 1.0f)); + const auto clipProjection = hlsl::transpose(projectionContext.projectionMatrix); + const auto clip = hlsl::mul(clipProjection, viewSpace); + if (!hlsl::CCameraMathUtilities::isFiniteScalar(clip.x) || !hlsl::CCameraMathUtilities::isFiniteScalar(clip.y) || !hlsl::CCameraMathUtilities::isFiniteScalar(clip.z) || !hlsl::CCameraMathUtilities::isFiniteScalar(clip.w)) + return false; + + const auto absW = hlsl::abs(clip.w); + if (absW < clipWEpsilon) + return false; + + const float invW = 1.0f / clip.w; + outMetrics.ndc = hlsl::float32_t2(clip.x, clip.y) * invW; + if (!hlsl::CCameraMathUtilities::isFiniteScalar(outMetrics.ndc.x) || !hlsl::CCameraMathUtilities::isFiniteScalar(outMetrics.ndc.y)) + return false; + + outMetrics.radius = hlsl::length(outMetrics.ndc); + return true; +} + +bool CCameraFollowRegressionUtilities::validateProjectedFollowTargetContract( + const SCameraProjectionContext& projectionContext, + const core::CTrackedTarget& trackedTarget, + SCameraProjectedTargetMetrics& outMetrics, + std::string* error, + const SCameraFollowRegressionThresholds& thresholds) +{ + if (!tryComputeProjectedFollowTargetMetrics(projectionContext, trackedTarget, outMetrics, thresholds.clipWEpsilon)) + { + if (error) + *error = "failed to project follow target"; + return false; + } + + if (outMetrics.radius > thresholds.projectedNdcTolerance) + { + if (error) + { + *error = "projected target mismatch ndc=(" + std::to_string(outMetrics.ndc.x) + + "," + std::to_string(outMetrics.ndc.y) + ") radius=" + std::to_string(outMetrics.radius); + } + return false; + } + + return true; +} + +SCameraFollowVisualMetrics CCameraFollowRegressionUtilities::buildFollowVisualMetrics( + core::ICamera* camera, + const core::CTrackedTarget& trackedTarget, + const core::SCameraFollowConfig* followConfig, + const SCameraProjectionContext* projectionContext) +{ + SCameraFollowVisualMetrics out = {}; + if (!camera || !followConfig || !followConfig->enabled || followConfig->mode == core::ECameraFollowMode::Disabled) + return out; + + out.active = true; + out.mode = followConfig->mode; + + double targetDistance = 0.0; + out.lockValid = core::CCameraFollowUtilities::cameraFollowModeLocksViewToTarget(followConfig->mode) && + core::CCameraFollowUtilities::tryComputeFollowTargetLockMetrics(camera->getGimbal(), trackedTarget, out.lockAngleDeg, &targetDistance); + if (out.lockValid) + out.targetDistance = static_cast(targetDistance); + + if (out.lockValid && projectionContext) + out.projectedValid = tryComputeProjectedFollowTargetMetrics(*projectionContext, trackedTarget, out.projectedTarget); + + return out; +} + +bool CCameraFollowRegressionUtilities::validateFollowTargetContract( + core::ICamera* camera, + const core::CTrackedTarget& trackedTarget, + const core::SCameraFollowConfig& followConfig, + const core::CCameraGoal& followGoal, + SCameraFollowRegressionResult& out, + std::string* error, + const SCameraProjectionContext* projectionContext, + const SCameraFollowRegressionThresholds& thresholds) +{ + out = {}; + if (!camera) + { + if (error) + *error = "missing camera"; + return false; + } + + if (core::CCameraFollowUtilities::cameraFollowModeLocksViewToTarget(followConfig.mode)) + { + out.hasLockMetrics = core::CCameraFollowUtilities::tryComputeFollowTargetLockMetrics(camera->getGimbal(), trackedTarget, out.lockAngleDeg, &out.targetDistance); + if (!out.hasLockMetrics) + { + if (error) + *error = "failed to compute follow lock metrics"; + return false; + } + + const auto& trackedTargetGimbal = trackedTarget.getGimbal(); + const auto& cameraGimbal = camera->getGimbal(); + const hlsl::float64_t3 trackedTargetPosition = trackedTargetGimbal.getPosition(); + const hlsl::float64_t3 cameraPosition = cameraGimbal.getPosition(); + const double expectedTargetDistance = hlsl::length(trackedTargetPosition - cameraPosition); + if (!hlsl::CCameraMathUtilities::isFiniteScalar(expectedTargetDistance) || hlsl::abs(expectedTargetDistance - out.targetDistance) > thresholds.distanceTolerance) + { + if (error) + { + *error = "target distance mismatch actual=" + std::to_string(out.targetDistance) + + " expected=" + std::to_string(expectedTargetDistance); + } + return false; + } + + if (out.lockAngleDeg > thresholds.lockAngleToleranceDeg) + { + if (error) + *error = "lock angle mismatch angle_deg=" + std::to_string(out.lockAngleDeg); + return false; + } + + if (projectionContext) + { + out.hasProjectedMetrics = tryComputeProjectedFollowTargetMetrics( + *projectionContext, + trackedTarget, + out.projectedTarget, + thresholds.clipWEpsilon); + if (!out.hasProjectedMetrics) + { + if (error) + *error = "failed to compute projected follow target metrics"; + return false; + } + + if (out.projectedTarget.radius > thresholds.projectedNdcTolerance) + { + if (error) + { + *error = "projected target mismatch ndc=(" + std::to_string(out.projectedTarget.ndc.x) + + "," + std::to_string(out.projectedTarget.ndc.y) + ") radius=" + std::to_string(out.projectedTarget.radius); + } + return false; + } + } + } + + if (camera->supportsGoalState(core::ICamera::GoalStateSphericalTarget)) + { + core::ICamera::SphericalTargetState state; + if (!camera->tryGetSphericalTargetState(state)) + { + if (error) + *error = "missing spherical target state"; + return false; + } + + out.hasSphericalState = true; + out.sphericalTarget = state.target; + out.sphericalDistance = state.distance; + + const auto& trackedTargetGimbal = trackedTarget.getGimbal(); + const auto& cameraGimbal = camera->getGimbal(); + const hlsl::float64_t3 trackedTargetPosition = trackedTargetGimbal.getPosition(); + const hlsl::float64_t3 targetDelta = state.target - trackedTargetPosition; + const double targetDeltaLen = hlsl::length(targetDelta); + if (!hlsl::CCameraMathUtilities::isFiniteScalar(targetDeltaLen) || targetDeltaLen > thresholds.targetTolerance) + { + if (error) + *error = "spherical target writeback mismatch"; + return false; + } + + const double actualDistance = hlsl::length(cameraGimbal.getPosition() - trackedTargetPosition); + const auto expectedDistance = followGoal.hasOrbitState ? static_cast(followGoal.orbitDistance) : + (followGoal.hasDistance ? static_cast(followGoal.distance) : actualDistance); + if (!hlsl::CCameraMathUtilities::isFiniteScalar(actualDistance) || !hlsl::CCameraMathUtilities::isFiniteScalar(expectedDistance) || + hlsl::abs(actualDistance - expectedDistance) > thresholds.distanceTolerance || + hlsl::abs(static_cast(state.distance) - expectedDistance) > thresholds.distanceTolerance) + { + if (error) + { + *error = "spherical distance mismatch actual=" + std::to_string(actualDistance) + + " state=" + std::to_string(state.distance) + + " expected=" + std::to_string(expectedDistance); + } + return false; + } + } + + out.passed = true; + return true; +} + +bool CCameraFollowRegressionUtilities::buildApplyAndValidateFollowTargetContract( + const core::CCameraGoalSolver& solver, + core::ICamera* camera, + const core::CTrackedTarget& trackedTarget, + const core::SCameraFollowConfig& followConfig, + SCameraFollowApplyValidationResult& out, + std::string* error, + const SCameraProjectionContext* projectionContext, + const SCameraFollowRegressionThresholds& thresholds) +{ + out = {}; + + if (!core::CCameraFollowUtilities::tryBuildFollowGoal(solver, camera, trackedTarget, followConfig, out.goal)) + { + if (error) + *error = "failed to build follow goal"; + return false; + } + out.hasGoal = true; + + out.applyResult = core::CCameraFollowUtilities::applyFollowToCamera(solver, camera, trackedTarget, followConfig); + if (!out.applyResult.succeeded()) + { + if (error) + *error = "failed to apply follow goal"; + return false; + } + + const auto capture = solver.captureDetailed(camera); + if (!capture.canUseGoal()) + { + if (error) + *error = "failed to capture camera state after follow apply"; + return false; + } + + out.hasCapturedGoal = true; + out.capturedGoal = capture.goal; + if (!core::CCameraGoalUtilities::compareGoals(out.capturedGoal, out.goal, thresholds.positionTolerance, thresholds.rotationToleranceDeg, thresholds.scalarTolerance)) + { + if (error) + *error = std::string("follow goal mismatch. ") + core::CCameraGoalUtilities::describeGoalMismatch(out.capturedGoal, out.goal); + return false; + } + + return validateFollowTargetContract( + camera, + trackedTarget, + followConfig, + out.goal, + out.regression, + error, + projectionContext, + thresholds); +} + +} // namespace nbl::system diff --git a/src/nbl/ext/Cameras/CCameraFollowUtilities.cpp b/src/nbl/ext/Cameras/CCameraFollowUtilities.cpp new file mode 100644 index 0000000000..354c87cf78 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraFollowUtilities.cpp @@ -0,0 +1,211 @@ +// Copyright (C) 2018-2025 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraFollowUtilities.hpp" + +namespace nbl::core +{ + +CTrackedTarget::CTrackedTarget( + const hlsl::float64_t3& position, + const hlsl::camera_quaternion_t& orientation, + std::string identifier) + : m_identifier(std::move(identifier)), + m_gimbal(gimbal_t::base_t::SCreationParameters{ .position = position, .orientation = orientation }) +{ + m_gimbal.updateView(); +} + +void CTrackedTarget::setPose(const hlsl::float64_t3& position, const hlsl::camera_quaternion_t& orientation) +{ + m_gimbal.begin(); + m_gimbal.setPosition(position); + m_gimbal.setOrientation(orientation); + m_gimbal.end(); + m_gimbal.updateView(); +} + +void CTrackedTarget::setPosition(const hlsl::float64_t3& position) +{ + setPose(position, m_gimbal.getOrientation()); +} + +void CTrackedTarget::setOrientation(const hlsl::camera_quaternion_t& orientation) +{ + setPose(m_gimbal.getPosition(), orientation); +} + +bool CTrackedTarget::trySetFromTransform(const hlsl::float64_t4x4& transform) +{ + hlsl::float64_t3 position = hlsl::float64_t3(0.0); + hlsl::camera_quaternion_t orientation = hlsl::CCameraMathUtilities::makeIdentityQuaternion(); + if (!hlsl::CCameraMathUtilities::tryExtractRigidPoseFromTransform(transform, position, orientation)) + return false; + + setPose(position, orientation); + return true; +} + +hlsl::float64_t3 CCameraFollowUtilities::transformFollowLocalOffset(const ICamera::CGimbal& gimbal, const hlsl::float64_t3& localOffset) +{ + return hlsl::CCameraMathUtilities::rotateVectorByQuaternion(gimbal.getOrientation(), localOffset); +} + +hlsl::float64_t3 CCameraFollowUtilities::projectFollowWorldOffsetToLocal(const ICamera::CGimbal& gimbal, const hlsl::float64_t3& worldOffset) +{ + return hlsl::CCameraMathUtilities::projectWorldVectorToLocalQuaternionFrame(gimbal.getOrientation(), worldOffset); +} + +bool CCameraFollowUtilities::buildFollowLookAtOrientation( + const hlsl::float64_t3& position, + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& preferredUp, + hlsl::camera_quaternion_t& outOrientation) +{ + return hlsl::CCameraMathUtilities::tryBuildLookAtOrientation(position, targetPosition, preferredUp, outOrientation); +} + +bool CCameraFollowUtilities::captureFollowOffsetsFromCamera( + const CCameraGoalSolver& solver, + ICamera* camera, + const CTrackedTarget& trackedTarget, + SCameraFollowConfig& ioConfig) +{ + const auto capture = solver.captureDetailed(camera); + if (!capture.canUseGoal()) + return false; + + const auto& targetGimbal = trackedTarget.getGimbal(); + ioConfig.worldOffset = capture.goal.position - targetGimbal.getPosition(); + ioConfig.localOffset = projectFollowWorldOffsetToLocal(targetGimbal, ioConfig.worldOffset); + return true; +} + +bool CCameraFollowUtilities::tryComputeFollowTargetLockMetrics( + const ICamera::CGimbal& cameraGimbal, + const CTrackedTarget& trackedTarget, + float& outAngleDeg, + double* outDistance) +{ + const auto toTarget = trackedTarget.getGimbal().getPosition() - cameraGimbal.getPosition(); + const auto targetDistance = hlsl::length(toTarget); + if (!hlsl::CCameraMathUtilities::isFiniteScalar(targetDistance) || targetDistance <= SCameraToolingThresholds::TinyScalarEpsilon) + return false; + + const auto forward = cameraGimbal.getZAxis(); + const auto forwardLength = hlsl::length(forward); + if (!hlsl::CCameraMathUtilities::isFiniteVec3(forward) || !hlsl::CCameraMathUtilities::isFiniteScalar(forwardLength) || forwardLength <= SCameraToolingThresholds::TinyScalarEpsilon) + return false; + + const auto forwardDirection = forward / forwardLength; + const auto targetDir = toTarget / targetDistance; + const auto dotForward = std::clamp(hlsl::dot(forwardDirection, targetDir), -1.0, 1.0); + outAngleDeg = static_cast(hlsl::degrees(hlsl::acos(dotForward))); + if (!hlsl::CCameraMathUtilities::isFiniteScalar(outAngleDeg)) + return false; + + if (outDistance) + *outDistance = targetDistance; + return true; +} + +bool CCameraFollowUtilities::tryBuildFollowPositionGoal( + ICamera* camera, + CCameraGoal& outGoal, + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const hlsl::float64_t3& preferredUp) +{ + if (camera->supportsGoalState(ICamera::GoalStateSphericalTarget)) + return CCameraGoalUtilities::buildCanonicalTargetRelativeGoalFromPosition(outGoal, targetPosition, position); + + outGoal.position = position; + return buildFollowLookAtOrientation(outGoal.position, targetPosition, preferredUp, outGoal.orientation) && CCameraGoalUtilities::isGoalFinite(outGoal); +} + +bool CCameraFollowUtilities::tryBuildFollowGoal( + const CCameraGoalSolver& solver, + ICamera* camera, + const CTrackedTarget& trackedTarget, + const SCameraFollowConfig& config, + CCameraGoal& outGoal) +{ + if (!camera || !config.enabled || config.mode == ECameraFollowMode::Disabled) + return false; + + const auto capture = solver.captureDetailed(camera); + if (!capture.canUseGoal()) + return false; + + outGoal = capture.goal; + + const auto& targetGimbal = trackedTarget.getGimbal(); + const auto targetPosition = targetGimbal.getPosition(); + + switch (config.mode) + { + case ECameraFollowMode::OrbitTarget: + { + if (!camera->supportsGoalState(ICamera::GoalStateSphericalTarget)) + return false; + + if (outGoal.hasPathState) + { + return CCameraGoalUtilities::applyCanonicalPathGoalFields(outGoal, targetPosition, outGoal.pathState) && CCameraGoalUtilities::isGoalFinite(outGoal); + } + + const bool hasSphericalState = outGoal.hasOrbitState || outGoal.hasDistance; + if (!hasSphericalState) + return false; + + const auto orbitDistance = outGoal.hasOrbitState ? outGoal.orbitDistance : outGoal.distance; + return CCameraGoalUtilities::applyCanonicalTargetRelativeGoal( + outGoal, + { + .target = targetPosition, + .orbitUv = outGoal.orbitUv, + .distance = orbitDistance + }); + } + + case ECameraFollowMode::LookAtTarget: + { + return tryBuildFollowPositionGoal(camera, outGoal, targetPosition, capture.goal.position, targetGimbal.getYAxis()); + } + + case ECameraFollowMode::KeepWorldOffset: + { + const auto position = targetPosition + config.worldOffset; + return tryBuildFollowPositionGoal(camera, outGoal, targetPosition, position, targetGimbal.getYAxis()); + } + + case ECameraFollowMode::KeepLocalOffset: + { + const auto position = targetPosition + transformFollowLocalOffset(targetGimbal, config.localOffset); + return tryBuildFollowPositionGoal(camera, outGoal, targetPosition, position, targetGimbal.getYAxis()); + } + + default: + return false; + } +} + +CCameraGoalSolver::SApplyResult CCameraFollowUtilities::applyFollowToCamera( + const CCameraGoalSolver& solver, + ICamera* camera, + const CTrackedTarget& trackedTarget, + const SCameraFollowConfig& config, + CCameraGoal* outGoal) +{ + CCameraGoal goal = {}; + if (!tryBuildFollowGoal(solver, camera, trackedTarget, config, goal)) + return {}; + + if (outGoal) + *outGoal = goal; + + return solver.applyDetailed(camera, goal); +} + +} // namespace nbl::core diff --git a/src/nbl/ext/Cameras/CCameraGoalSolver.cpp b/src/nbl/ext/Cameras/CCameraGoalSolver.cpp new file mode 100644 index 0000000000..8b358377f5 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraGoalSolver.cpp @@ -0,0 +1,548 @@ +// Copyright (C) 2018-2025 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraGoalSolver.hpp" + +#include + +namespace nbl::core +{ + +bool CCameraGoalSolver::buildEvents(ICamera* camera, const CCameraGoal& target, std::vector& out) const +{ + out.clear(); + if (!camera) + return false; + + const auto canonicalTarget = CCameraGoalUtilities::canonicalizeGoal(target); + + if (camera->hasCapability(ICamera::SphericalTarget)) + return buildSphericalEvents(camera, canonicalTarget, out); + + return buildFreeEvents(camera, canonicalTarget, out); +} + +bool CCameraGoalSolver::capture(ICamera* camera, CCameraGoal& out) const +{ + out = {}; + if (!camera) + return false; + + const ICamera::CGimbal& gimbal = camera->getGimbal(); + out.position = hlsl::float64_t3(gimbal.getPosition()); + out.orientation = gimbal.getOrientation(); + out.sourceKind = camera->getKind(); + out.sourceCapabilities = ICamera::capability_flags_t(camera->getCapabilities()); + out.sourceGoalStateMask = ICamera::goal_state_flags_t(camera->getGoalStateMask()); + + ICamera::SphericalTargetState sphericalState; + if (camera->tryGetSphericalTargetState(sphericalState)) + { + out.targetPosition = sphericalState.target; + out.hasTargetPosition = true; + out.distance = sphericalState.distance; + out.hasDistance = true; + out.orbitDistance = sphericalState.distance; + out.orbitUv = sphericalState.orbitUv; + out.hasOrbitState = true; + } + + ICamera::DynamicPerspectiveState dynamicState; + if (camera->tryGetDynamicPerspectiveState(dynamicState)) + { + out.hasDynamicPerspectiveState = true; + out.dynamicPerspectiveState = dynamicState; + } + + ICamera::PathState pathState; + if (camera->tryGetPathState(pathState)) + { + out.hasPathState = true; + out.pathState = pathState; + } + + out = CCameraGoalUtilities::canonicalizeGoal(out); + return true; +} + +CCameraGoalSolver::SCaptureResult CCameraGoalSolver::captureDetailed(ICamera* camera) const +{ + SCaptureResult result; + result.hasCamera = camera != nullptr; + if (!result.hasCamera) + return result; + + result.captured = capture(camera, result.goal); + result.finiteGoal = result.captured && CCameraGoalUtilities::isGoalFinite(result.goal); + return result; +} + +CCameraGoalSolver::SCompatibilityResult CCameraGoalSolver::analyzeCompatibility(const ICamera* camera, const CCameraGoal& target) const +{ + SCompatibilityResult result; + if (!camera) + return result; + + const auto canonicalTarget = CCameraGoalUtilities::canonicalizeGoal(target); + result.sameKind = canonicalTarget.sourceKind == ICamera::CameraKind::Unknown || canonicalTarget.sourceKind == camera->getKind(); + result.supportedGoalStateMask = ICamera::goal_state_flags_t(camera->getGoalStateMask()); + result.requiredGoalStateMask = CCameraGoalUtilities::getRequiredGoalStateMask(canonicalTarget); + result.missingGoalStateMask = result.requiredGoalStateMask & ~result.supportedGoalStateMask; + result.exact = result.missingGoalStateMask == ICamera::GoalStateNone; + return result; +} + +CCameraGoalSolver::SApplyResult CCameraGoalSolver::applyDetailed(ICamera* camera, const CCameraGoal& target) const +{ + SApplyResult result; + if (!camera) + return result; + + const auto canonicalTarget = CCameraGoalUtilities::canonicalizeGoal(target); + + bool exact = true; + bool absoluteChanged = false; + + if (!camera->hasCapability(ICamera::SphericalTarget)) + { + bool poseChanged = false; + bool poseExact = false; + if (tryApplyAbsoluteReferencePose(camera, canonicalTarget, poseChanged, poseExact)) + { + result.issues |= SApplyResult::EIssue::UsedAbsolutePoseFallback; + absoluteChanged = absoluteChanged || poseChanged; + if (poseExact && !canonicalTarget.hasDynamicPerspectiveState) + { + result.status = poseChanged ? + SApplyResult::EStatus::AppliedAbsoluteOnly : + SApplyResult::EStatus::AlreadySatisfied; + result.exact = true; + return result; + } + } + } + + if (canonicalTarget.hasTargetPosition) + { + ICamera::SphericalTargetState beforeState; + if (!camera->tryGetSphericalTargetState(beforeState)) + { + result.issues |= SApplyResult::EIssue::MissingSphericalTargetState; + exact = false; + } + else + { + const auto beforeTarget = beforeState.target; + if (!camera->trySetSphericalTarget(canonicalTarget.targetPosition)) + { + result.issues |= SApplyResult::EIssue::MissingSphericalTargetState; + exact = false; + } + else + { + ICamera::SphericalTargetState afterState; + if (!camera->tryGetSphericalTargetState(afterState)) + { + result.issues |= SApplyResult::EIssue::MissingSphericalTargetState; + exact = false; + } + else + { + absoluteChanged = afterState.target != beforeTarget; + exact = exact && afterState.target == canonicalTarget.targetPosition; + } + } + } + } + + if (canonicalTarget.hasDistance || canonicalTarget.hasOrbitState) + { + ICamera::SphericalTargetState beforeState; + if (!camera->tryGetSphericalTargetState(beforeState)) + { + result.issues |= SApplyResult::EIssue::MissingSphericalTargetState; + exact = false; + } + else + { + const float desiredDistance = canonicalTarget.hasOrbitState ? canonicalTarget.orbitDistance : canonicalTarget.distance; + const float beforeDistance = beforeState.distance; + if (!camera->trySetSphericalDistance(desiredDistance)) + { + result.issues |= SApplyResult::EIssue::MissingSphericalTargetState; + exact = false; + } + else + { + ICamera::SphericalTargetState afterState; + if (!camera->tryGetSphericalTargetState(afterState)) + { + result.issues |= SApplyResult::EIssue::MissingSphericalTargetState; + exact = false; + } + else + { + absoluteChanged = absoluteChanged || afterState.distance != beforeDistance; + exact = exact && hlsl::abs(static_cast(afterState.distance - desiredDistance)) <= SCameraToolingThresholds::ScalarTolerance; + } + } + } + } + + if (canonicalTarget.hasPathState) + { + ICamera::PathState beforeState; + if (!camera->tryGetPathState(beforeState)) + { + result.issues |= SApplyResult::EIssue::MissingPathState; + exact = false; + } + else if (!camera->trySetPathState(canonicalTarget.pathState)) + { + result.issues |= SApplyResult::EIssue::MissingPathState; + exact = false; + } + else + { + ICamera::PathState afterState; + if (!camera->tryGetPathState(afterState)) + { + result.issues |= SApplyResult::EIssue::MissingPathState; + exact = false; + } + else + { + const auto thresholds = SCameraPathDefaults::ComparisonThresholds; + const bool pathChanged = CCameraPathUtilities::pathStatesChanged(beforeState, afterState, thresholds); + const bool pathExact = CCameraPathUtilities::pathStatesNearlyEqual(afterState, canonicalTarget.pathState, thresholds); + + absoluteChanged = absoluteChanged || pathChanged; + exact = exact && pathExact; + } + } + } + + if (canonicalTarget.hasDynamicPerspectiveState) + { + ICamera::DynamicPerspectiveState beforeState; + if (!camera->tryGetDynamicPerspectiveState(beforeState)) + { + result.issues |= SApplyResult::EIssue::MissingDynamicPerspectiveState; + exact = false; + } + else if (!camera->trySetDynamicPerspectiveState(canonicalTarget.dynamicPerspectiveState)) + { + result.issues |= SApplyResult::EIssue::MissingDynamicPerspectiveState; + exact = false; + } + else + { + ICamera::DynamicPerspectiveState afterState; + if (!camera->tryGetDynamicPerspectiveState(afterState)) + { + result.issues |= SApplyResult::EIssue::MissingDynamicPerspectiveState; + exact = false; + } + else + { + const bool dynamicChanged = !hlsl::CCameraMathUtilities::nearlyEqualScalar(beforeState.baseFov, afterState.baseFov, static_cast(SCameraToolingThresholds::ScalarTolerance)) || + !hlsl::CCameraMathUtilities::nearlyEqualScalar(beforeState.referenceDistance, afterState.referenceDistance, static_cast(SCameraToolingThresholds::ScalarTolerance)); + const bool dynamicExact = hlsl::CCameraMathUtilities::nearlyEqualScalar(afterState.baseFov, canonicalTarget.dynamicPerspectiveState.baseFov, static_cast(SCameraToolingThresholds::ScalarTolerance)) && + hlsl::CCameraMathUtilities::nearlyEqualScalar(afterState.referenceDistance, canonicalTarget.dynamicPerspectiveState.referenceDistance, static_cast(SCameraToolingThresholds::ScalarTolerance)); + + absoluteChanged = absoluteChanged || dynamicChanged; + exact = exact && dynamicExact; + } + } + } + + std::vector events; + buildEvents(camera, canonicalTarget, events); + result.eventCount = static_cast(events.size()); + result.exact = exact; + + if (events.empty()) + { + if (absoluteChanged) + result.status = SApplyResult::EStatus::AppliedAbsoluteOnly; + else if (exact) + result.status = SApplyResult::EStatus::AlreadySatisfied; + return result; + } + + if (camera->manipulate({ events.data(), events.size() })) + { + result.status = absoluteChanged ? + SApplyResult::EStatus::AppliedAbsoluteAndVirtualEvents : + SApplyResult::EStatus::AppliedVirtualEvents; + return result; + } + + if (absoluteChanged) + { + result.status = SApplyResult::EStatus::AppliedAbsoluteOnly; + result.exact = false; + return result; + } + + result.issues |= SApplyResult::EIssue::VirtualEventReplayFailed; + result.status = SApplyResult::EStatus::Failed; + result.exact = false; + return result; +} + +bool CCameraGoalSolver::apply(ICamera* camera, const CCameraGoal& target) const +{ + return applyDetailed(camera, target).succeeded(); +} + +void CCameraGoalSolver::appendYawPitchRollEvents( + std::vector& events, + const hlsl::float64_t3& eulerRadians, + double denominator, + bool includeRoll) const +{ + static constexpr std::array RotationBindings = {{ + { CVirtualGimbalEvent::TiltUp, CVirtualGimbalEvent::TiltDown }, + { CVirtualGimbalEvent::PanRight, CVirtualGimbalEvent::PanLeft }, + { CVirtualGimbalEvent::RollRight, CVirtualGimbalEvent::RollLeft } + }}; + + auto tolerances = SGoalSolverDefaults::AngularToleranceDegVec; + if (!includeRoll) + tolerances.z = std::numeric_limits::infinity(); + + CCameraVirtualEventUtilities::appendAngularAxisEvents( + events, + eulerRadians, + hlsl::float64_t3(denominator), + tolerances, + RotationBindings); +} + +void CCameraGoalSolver::appendPathDeltaEvents( + std::vector& events, + const SCameraPathDelta& delta, + double moveDenominator, + double rotationDenominator) const +{ + CCameraPathUtilities::appendPathDeltaEvents( + events, + delta, + moveDenominator, + rotationDenominator, + SCameraPathDefaults::ExactComparisonThresholds); +} + +double CCameraGoalSolver::getMoveMagnitudeDenominator(const ICamera* camera) const +{ + const double moveScale = camera->getMoveSpeedScale(); + return camera->getUnscaledVirtualTranslationMagnitude() * (moveScale == 0.0 ? SGoalSolverDefaults::UnitScale : moveScale); +} + +double CCameraGoalSolver::getRotationMagnitudeDenominator(const ICamera* camera) const +{ + const double rotationScale = camera->getRotationSpeedScale(); + return rotationScale == 0.0 ? SGoalSolverDefaults::UnitScale : rotationScale; +} + +bool CCameraGoalSolver::computePoseMismatch(ICamera* camera, const CCameraGoal& target, double& outPositionDelta, double& outRotationDeltaDeg) const +{ + outPositionDelta = 0.0; + outRotationDeltaDeg = 0.0; + if (!camera) + return false; + + const ICamera::CGimbal& gimbal = camera->getGimbal(); + hlsl::SCameraPoseDelta poseDelta = {}; + if (!hlsl::CCameraMathUtilities::tryComputePoseDelta(gimbal.getPosition(), gimbal.getOrientation(), target.position, target.orientation, poseDelta)) + return false; + + outPositionDelta = poseDelta.position; + outRotationDeltaDeg = poseDelta.rotationDeg; + return true; +} + +bool CCameraGoalSolver::tryApplyAbsoluteReferencePose(ICamera* camera, const CCameraGoal& target, bool& outChanged, bool& outExact) const +{ + outChanged = false; + outExact = false; + if (!camera) + return false; + + switch (camera->getKind()) + { + case ICamera::CameraKind::Free: + case ICamera::CameraKind::FPS: + break; + default: + return false; + } + + double beforePosDelta = 0.0; + double beforeRotDeltaDeg = 0.0; + if (!computePoseMismatch(camera, target, beforePosDelta, beforeRotDeltaDeg)) + return false; + + if (beforePosDelta <= SCameraToolingThresholds::DefaultPositionTolerance && beforeRotDeltaDeg <= SCameraToolingThresholds::DefaultAngularToleranceDeg) + { + outExact = true; + return true; + } + + const auto targetFrame = hlsl::CCameraMathUtilities::composeTransformMatrix(target.position, target.orientation); + + camera->manipulate({}, &targetFrame); + + double afterPosDelta = 0.0; + double afterRotDeltaDeg = 0.0; + if (!computePoseMismatch(camera, target, afterPosDelta, afterRotDeltaDeg)) + return false; + + outChanged = !hlsl::CCameraMathUtilities::isNearlyZeroScalar(afterPosDelta - beforePosDelta, static_cast(SCameraToolingThresholds::TinyScalarEpsilon)) || + !hlsl::CCameraMathUtilities::isNearlyZeroScalar(afterRotDeltaDeg - beforeRotDeltaDeg, static_cast(SCameraToolingThresholds::TinyScalarEpsilon)); + outExact = afterPosDelta <= SCameraToolingThresholds::DefaultPositionTolerance && afterRotDeltaDeg <= SCameraToolingThresholds::DefaultAngularToleranceDeg; + return true; +} + +bool CCameraGoalSolver::buildTargetRelativeEvents( + ICamera* camera, + const ICamera::SphericalTargetState& sphericalState, + const SCameraTargetRelativeState& goal, + std::vector& out, + const SCameraTargetRelativeEventPolicy& policy) const +{ + const auto delta = CCameraTargetRelativeUtilities::buildTargetRelativeDelta(sphericalState, goal); + CCameraTargetRelativeUtilities::appendTargetRelativeDeltaEvents( + out, + delta, + policy.translateOrbit ? getMoveMagnitudeDenominator(camera) : getRotationMagnitudeDenominator(camera), + SCameraToolingThresholds::DefaultAngularToleranceDeg, + camera->getUnscaledVirtualTranslationMagnitude(), + SCameraToolingThresholds::ScalarTolerance, + policy); + return !out.empty(); +} + +bool CCameraGoalSolver::buildPathEvents( + ICamera* camera, + const CCameraGoal& target, + const ICamera::SphericalTargetState& sphericalState, + std::vector& out) const +{ + if (!camera) + return false; + + const auto effectiveTarget = target.hasTargetPosition ? target.targetPosition : sphericalState.target; + ICamera::PathState currentState = {}; + const ICamera::PathState* currentStateOverride = camera->tryGetPathState(currentState) ? ¤tState : nullptr; + ICamera::PathStateLimits pathLimits = CCameraPathUtilities::makeDefaultPathLimits(); + camera->tryGetPathStateLimits(pathLimits); + SCameraPathStateTransition transition = {}; + if (!CCameraPathUtilities::tryBuildPathStateTransition( + effectiveTarget, + camera->getGimbal().getPosition(), + target.position, + pathLimits, + currentStateOverride, + target.hasPathState ? &target.pathState : nullptr, + transition)) + { + return false; + } + + const auto moveDenom = getMoveMagnitudeDenominator(camera); + const auto rotationDenom = getRotationMagnitudeDenominator(camera); + appendPathDeltaEvents(out, transition.delta, moveDenom, rotationDenom); + return !out.empty(); +} + +bool CCameraGoalSolver::buildSphericalEvents(ICamera* camera, const CCameraGoal& target, std::vector& out) const +{ + ICamera::SphericalTargetState sphericalState; + if (!camera || !camera->tryGetSphericalTargetState(sphericalState)) + return false; + + if (camera->getKind() == ICamera::CameraKind::Path) + return buildPathEvents(camera, target, sphericalState, out); + + SCameraTargetRelativeState goal; + if (!CCameraGoalUtilities::tryResolveCanonicalTargetRelativeState(target, sphericalState, goal)) + return false; + + switch (camera->getKind()) + { + case ICamera::CameraKind::Orbit: + case ICamera::CameraKind::DollyZoom: + return buildTargetRelativeEvents(camera, sphericalState, goal, out, SCameraTargetRelativeRigDefaults::OrbitTranslatePolicy); + + case ICamera::CameraKind::Turntable: + case ICamera::CameraKind::Arcball: + return buildTargetRelativeEvents(camera, sphericalState, goal, out, SCameraTargetRelativeRigDefaults::RotateDistancePolicy); + + case ICamera::CameraKind::TopDown: + return buildTargetRelativeEvents(camera, sphericalState, goal, out, SCameraTargetRelativeRigDefaults::TopDownPolicy); + + case ICamera::CameraKind::Isometric: + return buildTargetRelativeEvents(camera, sphericalState, goal, out, SCameraTargetRelativeRigDefaults::IsometricPolicy); + + case ICamera::CameraKind::Dolly: + return buildTargetRelativeEvents(camera, sphericalState, goal, out, SCameraTargetRelativeRigDefaults::DollyPolicy); + + case ICamera::CameraKind::Chase: + return buildTargetRelativeEvents(camera, sphericalState, goal, out, SCameraTargetRelativeRigDefaults::ChasePolicy); + + default: + return buildTargetRelativeEvents(camera, sphericalState, goal, out, SCameraTargetRelativeRigDefaults::OrbitTranslatePolicy); + } +} + +bool CCameraGoalSolver::buildFreeEvents(ICamera* camera, const CCameraGoal& target, std::vector& out) const +{ + const ICamera::CGimbal& gimbal = camera->getGimbal(); + const hlsl::float64_t3 currentPos = gimbal.getPosition(); + const hlsl::float64_t3 deltaWorld = target.position - currentPos; + CCameraVirtualEventUtilities::appendWorldTranslationAsLocalEvents( + out, + gimbal.getOrientation(), + deltaWorld, + SGoalSolverDefaults::UnitAxisDenominator, + SGoalSolverDefaults::ScalarToleranceVec); + + switch (camera->getKind()) + { + case ICamera::CameraKind::FPS: + { + const hlsl::float64_t2 currentPitchYaw = hlsl::CCameraMathUtilities::getPitchYawFromOrientation(gimbal.getOrientation()); + const hlsl::float64_t2 targetPitchYaw = hlsl::CCameraMathUtilities::getPitchYawFromOrientation(target.orientation); + + const double rotScale = camera->getRotationSpeedScale(); + const double invScale = rotScale == 0.0 ? SGoalSolverDefaults::UnitScale : (SGoalSolverDefaults::UnitScale / rotScale); + + appendYawPitchRollEvents( + out, + hlsl::float64_t3( + hlsl::CCameraMathUtilities::wrapAngleRad(targetPitchYaw.x - currentPitchYaw.x) * invScale, + hlsl::CCameraMathUtilities::wrapAngleRad(targetPitchYaw.y - currentPitchYaw.y) * invScale, + 0.0), + SGoalSolverDefaults::UnitScale, + false); + } break; + + case ICamera::CameraKind::Free: + { + appendYawPitchRollEvents( + out, + hlsl::CCameraMathUtilities::getOrientationDeltaEulerRadiansYXZ(gimbal.getOrientation(), target.orientation), + SGoalSolverDefaults::UnitScale); + } break; + + default: + break; + } + + return !out.empty(); +} + +} // namespace nbl::core diff --git a/src/nbl/ext/Cameras/CCameraInputBindingUtilities.cpp b/src/nbl/ext/Cameras/CCameraInputBindingUtilities.cpp new file mode 100644 index 0000000000..14c6f3cbab --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraInputBindingUtilities.cpp @@ -0,0 +1,479 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraInputBindingUtilities.hpp" + +#include + +namespace nbl::ui +{ + +namespace +{ + +using virtual_event_t = core::CVirtualGimbalEvent::VirtualEventType; +using keyboard_axis_group_t = std::array; +using mouse_axis_group_t = std::array; +using scalar_axis_pair_t = std::array; + +struct SKeyboardPresetSpec final +{ + keyboard_axis_group_t wasd = { + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None + }; + double wasdScale = IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale; + scalar_axis_pair_t qe = { + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None + }; + double qeScale = IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale; + keyboard_axis_group_t ijkl = { + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None + }; + double ijklScale = IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale; +}; + +struct SMousePresetSpec final +{ + mouse_axis_group_t relative = { + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None + }; + double relativeScale = IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale; + scalar_axis_pair_t scroll = { + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None + }; + double scrollScale = IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale; +}; + +struct SCameraInputBindingEventGroups final +{ + static inline constexpr std::array FpsMove = { + core::CVirtualGimbalEvent::MoveForward, + core::CVirtualGimbalEvent::MoveBackward, + core::CVirtualGimbalEvent::MoveLeft, + core::CVirtualGimbalEvent::MoveRight + }; + static inline constexpr std::array OrbitTranslate = { + core::CVirtualGimbalEvent::MoveUp, + core::CVirtualGimbalEvent::MoveDown, + core::CVirtualGimbalEvent::MoveLeft, + core::CVirtualGimbalEvent::MoveRight + }; + static inline constexpr std::array OrbitZoom = { + core::CVirtualGimbalEvent::MoveForward, + core::CVirtualGimbalEvent::MoveBackward + }; + static inline constexpr std::array VerticalMove = { + core::CVirtualGimbalEvent::MoveDown, + core::CVirtualGimbalEvent::MoveUp + }; + static inline constexpr std::array PathRigProgressAndU = { + core::CVirtualGimbalEvent::MoveForward, + core::CVirtualGimbalEvent::MoveBackward, + core::CVirtualGimbalEvent::MoveLeft, + core::CVirtualGimbalEvent::MoveRight + }; + static inline constexpr std::array PathRigV = VerticalMove; + static inline constexpr std::array TurntableMove = { + core::CVirtualGimbalEvent::MoveForward, + core::CVirtualGimbalEvent::MoveBackward, + core::CVirtualGimbalEvent::PanLeft, + core::CVirtualGimbalEvent::PanRight + }; + static inline constexpr std::array LookYawPitch = { + core::CVirtualGimbalEvent::TiltDown, + core::CVirtualGimbalEvent::TiltUp, + core::CVirtualGimbalEvent::PanLeft, + core::CVirtualGimbalEvent::PanRight + }; + static inline constexpr std::array Roll = { + core::CVirtualGimbalEvent::RollLeft, + core::CVirtualGimbalEvent::RollRight + }; + static inline constexpr std::array PanOnly = { + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::None, + core::CVirtualGimbalEvent::PanLeft, + core::CVirtualGimbalEvent::PanRight + }; + static inline constexpr std::array RelativeLook = { + core::CVirtualGimbalEvent::PanRight, + core::CVirtualGimbalEvent::PanLeft, + core::CVirtualGimbalEvent::TiltUp, + core::CVirtualGimbalEvent::TiltDown + }; + static inline constexpr std::array RelativeOrbitTranslate = { + core::CVirtualGimbalEvent::MoveRight, + core::CVirtualGimbalEvent::MoveLeft, + core::CVirtualGimbalEvent::MoveUp, + core::CVirtualGimbalEvent::MoveDown + }; + static inline constexpr std::array RelativeTopDown = { + core::CVirtualGimbalEvent::PanRight, + core::CVirtualGimbalEvent::PanLeft, + core::CVirtualGimbalEvent::MoveUp, + core::CVirtualGimbalEvent::MoveDown + }; +}; + +struct SCameraInteractionBindingSpec +{ + SKeyboardPresetSpec keyboard = {}; + SMousePresetSpec mouse = {}; +}; + +struct SCameraMappedInteractionBindingSpec +{ + IGimbalBindingLayout::keyboard_to_virtual_events_t keyboard; + IGimbalBindingLayout::mouse_to_virtual_events_t mouse; +}; + +template +bool containsBindingForAnyCode(const Map& preset, const Codes& codes) +{ + for (const auto code : codes) + { + if (preset.find(code) != preset.end()) + return true; + } + return false; +} + +template +bool containsBindingForAnyCodeGroups(const Map& preset, const Codes&... codes) +{ + return (containsBindingForAnyCode(preset, codes) || ...); +} + +constexpr size_t interactionFamilyIndex(const core::ECameraInteractionFamily family) +{ + return static_cast(family); +} + +template +void appendBindingSpec(Map& preset, const Codes& codes, const Events& events, const double magnitudeScale) +{ + for (size_t i = 0u; i < codes.size() && i < events.size(); ++i) + { + const auto event = events[i]; + if (event == core::CVirtualGimbalEvent::None) + continue; + preset.emplace(codes[i], IGimbalBindingLayout::CHashInfo(event, magnitudeScale)); + } +} + +template +void appendMirroredBindingSpec(Map& preset, const Codes& codes, const virtual_event_t event, const double magnitudeScale) +{ + if (event == core::CVirtualGimbalEvent::None) + return; + + std::array> duplicatedEvents = {}; + duplicatedEvents.fill(event); + appendBindingSpec(preset, codes, duplicatedEvents, magnitudeScale); +} + +IGimbalBindingLayout::keyboard_to_virtual_events_t buildKeyboardPreset(const SKeyboardPresetSpec& spec) +{ + IGimbalBindingLayout::keyboard_to_virtual_events_t preset; + appendBindingSpec(preset, SCameraInputBindingPhysicalGroups::KeyboardWasdCodes, spec.wasd, spec.wasdScale); + appendBindingSpec(preset, SCameraInputBindingPhysicalGroups::KeyboardQeCodes, spec.qe, spec.qeScale); + appendBindingSpec(preset, SCameraInputBindingPhysicalGroups::KeyboardIjklCodes, spec.ijkl, spec.ijklScale); + return preset; +} + +IGimbalBindingLayout::mouse_to_virtual_events_t buildMousePreset(const SMousePresetSpec& spec) +{ + IGimbalBindingLayout::mouse_to_virtual_events_t preset; + appendBindingSpec(preset, SCameraInputBindingPhysicalGroups::RelativeMouseCodes, spec.relative, spec.relativeScale); + appendMirroredBindingSpec(preset, SCameraInputBindingPhysicalGroups::PositiveScrollCodes, spec.scroll[0], spec.scrollScale); + appendMirroredBindingSpec(preset, SCameraInputBindingPhysicalGroups::NegativeScrollCodes, spec.scroll[1], spec.scrollScale); + return preset; +} + +double getDefaultImguizmoMagnitudeScale(const virtual_event_t event) +{ + if (core::CVirtualGimbalEvent::isTranslationEvent(event)) + return CCameraInputBindingUtilities::SInputMagnitudeDefaults::ImguizmoTranslationUnitsPerWorldUnit; + if (core::CVirtualGimbalEvent::isRotationEvent(event)) + return CCameraInputBindingUtilities::SInputMagnitudeDefaults::ImguizmoRotationUnitsPerRadian; + if (core::CVirtualGimbalEvent::isScaleEvent(event)) + return CCameraInputBindingUtilities::SInputMagnitudeDefaults::ImguizmoScaleUnitsPerFactor; + return IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale; +} + +IGimbalBindingLayout::imguizmo_to_virtual_events_t makeImguizmoPreset(const uint32_t allowedVirtualEvents) +{ + IGimbalBindingLayout::imguizmo_to_virtual_events_t preset; + for (const auto event : core::CVirtualGimbalEvent::VirtualEventsTypeTable) + { + if (event == core::CVirtualGimbalEvent::None) + continue; + if ((allowedVirtualEvents & event) != event) + continue; + preset.emplace(event, IGimbalBindingLayout::CHashInfo(event, getDefaultImguizmoMagnitudeScale(event))); + } + return preset; +} + +constexpr SCameraInteractionBindingSpec EmptyInteractionBindingSpec = {}; + +constexpr SKeyboardPresetSpec FpsKeyboardSpec = { + SCameraInputBindingEventGroups::FpsMove, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + {}, + IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale, + SCameraInputBindingEventGroups::LookYawPitch, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond +}; + +constexpr SKeyboardPresetSpec FreeKeyboardSpec = { + FpsKeyboardSpec.wasd, + FpsKeyboardSpec.wasdScale, + SCameraInputBindingEventGroups::Roll, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + FpsKeyboardSpec.ijkl, + FpsKeyboardSpec.ijklScale +}; + +constexpr SKeyboardPresetSpec OrbitKeyboardSpec = { + SCameraInputBindingEventGroups::OrbitTranslate, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + SCameraInputBindingEventGroups::OrbitZoom, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + {}, + IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale +}; + +constexpr SKeyboardPresetSpec TargetRigKeyboardSpec = { + FpsKeyboardSpec.wasd, + FpsKeyboardSpec.wasdScale, + SCameraInputBindingEventGroups::VerticalMove, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + FpsKeyboardSpec.ijkl, + FpsKeyboardSpec.ijklScale +}; + +constexpr SKeyboardPresetSpec TurntableKeyboardSpec = { + SCameraInputBindingEventGroups::TurntableMove, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + {}, + IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale, + FpsKeyboardSpec.ijkl, + FpsKeyboardSpec.ijklScale +}; + +constexpr SKeyboardPresetSpec TopDownKeyboardSpec = { + OrbitKeyboardSpec.wasd, + OrbitKeyboardSpec.wasdScale, + OrbitKeyboardSpec.qe, + OrbitKeyboardSpec.qeScale, + SCameraInputBindingEventGroups::PanOnly, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond +}; + +constexpr SKeyboardPresetSpec PathKeyboardSpec = { + SCameraInputBindingEventGroups::PathRigProgressAndU, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + SCameraInputBindingEventGroups::PathRigV, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::KeyboardHeldUnitsPerSecond, + {}, + IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale +}; + +constexpr SMousePresetSpec FpsMouseSpec = { + SCameraInputBindingEventGroups::RelativeLook, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::RelativeMouseUnitsPerStep, + {}, + IGimbalBindingLayout::CHashInfo::DefaultMagnitudeScale +}; + +constexpr SMousePresetSpec OrbitMouseSpec = { + SCameraInputBindingEventGroups::RelativeOrbitTranslate, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::RelativeMouseUnitsPerStep, + SCameraInputBindingEventGroups::OrbitZoom, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::ScrollUnitsPerStep +}; + +constexpr SMousePresetSpec TargetRigMouseSpec = { + FpsMouseSpec.relative, + FpsMouseSpec.relativeScale, + OrbitMouseSpec.scroll, + OrbitMouseSpec.scrollScale +}; + +constexpr SMousePresetSpec TopDownMouseSpec = { + SCameraInputBindingEventGroups::RelativeTopDown, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::RelativeMouseUnitsPerStep, + OrbitMouseSpec.scroll, + OrbitMouseSpec.scrollScale +}; + +constexpr SMousePresetSpec PathMouseSpec = { + SCameraInputBindingEventGroups::RelativeOrbitTranslate, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::RelativeMouseUnitsPerStep, + SCameraInputBindingEventGroups::OrbitZoom, + CCameraInputBindingUtilities::SInputMagnitudeDefaults::ScrollUnitsPerStep +}; + +constexpr SCameraInteractionBindingSpec FpsInteractionBindingSpec = { + FpsKeyboardSpec, + FpsMouseSpec +}; + +constexpr SCameraInteractionBindingSpec FreeInteractionBindingSpec = { + FreeKeyboardSpec, + FpsMouseSpec +}; + +constexpr SCameraInteractionBindingSpec OrbitInteractionBindingSpec = { + OrbitKeyboardSpec, + OrbitMouseSpec +}; + +constexpr SCameraInteractionBindingSpec TargetRigInteractionBindingSpec = { + TargetRigKeyboardSpec, + TargetRigMouseSpec +}; + +constexpr SCameraInteractionBindingSpec TurntableInteractionBindingSpec = { + TurntableKeyboardSpec, + TargetRigMouseSpec +}; + +constexpr SCameraInteractionBindingSpec TopDownInteractionBindingSpec = { + TopDownKeyboardSpec, + TopDownMouseSpec +}; + +constexpr SCameraInteractionBindingSpec PathInteractionBindingSpec = { + PathKeyboardSpec, + PathMouseSpec +}; + +template +auto makePresetCache(const SpecArray& specs, Builder&& builder) +{ + std::array> cache = {}; + for (size_t i = 0u; i < specs.size(); ++i) + cache[i] = builder(specs[i]); + return cache; +} + +SCameraMappedInteractionBindingSpec mapInteractionBindingSpec(const SCameraInteractionBindingSpec& spec) +{ + return { + .keyboard = buildKeyboardPreset(spec.keyboard), + .mouse = buildMousePreset(spec.mouse) + }; +} + +constexpr std::array InteractionFamilyPresetSpecs = {{ + EmptyInteractionBindingSpec, + FpsInteractionBindingSpec, + FreeInteractionBindingSpec, + OrbitInteractionBindingSpec, + TargetRigInteractionBindingSpec, + TurntableInteractionBindingSpec, + TopDownInteractionBindingSpec, + PathInteractionBindingSpec +}}; + +const SCameraMappedInteractionBindingSpec& interactionBindingPresetForKind(const core::ICamera::CameraKind kind) +{ + const auto familyIx = interactionFamilyIndex(core::CCameraKindUtilities::getCameraInteractionFamily(kind)); + static const auto cache = makePresetCache( + InteractionFamilyPresetSpecs, + [](const SCameraInteractionBindingSpec& spec) { return mapInteractionBindingSpec(spec); }); + return cache[familyIx < cache.size() ? familyIx : 0u]; +} + +} // namespace + +bool CCameraInputBindingUtilities::hasMouseRelativeMovementBinding(const IGimbalBindingLayout::mouse_to_virtual_events_t& mousePreset) +{ + return containsBindingForAnyCodeGroups(mousePreset, SCameraInputBindingPhysicalGroups::RelativeMouseCodes); +} + +bool CCameraInputBindingUtilities::hasMouseScrollBinding(const IGimbalBindingLayout::mouse_to_virtual_events_t& mousePreset) +{ + return containsBindingForAnyCodeGroups( + mousePreset, + SCameraInputBindingPhysicalGroups::PositiveScrollCodes, + SCameraInputBindingPhysicalGroups::NegativeScrollCodes); +} + +const IGimbalBindingLayout::keyboard_to_virtual_events_t& CCameraInputBindingUtilities::getDefaultCameraKeyboardMappingPreset(const core::ICamera::CameraKind kind) +{ + return interactionBindingPresetForKind(kind).keyboard; +} + +const IGimbalBindingLayout::keyboard_to_virtual_events_t& CCameraInputBindingUtilities::getDefaultCameraKeyboardMappingPreset(const core::ICamera& camera) +{ + return getDefaultCameraKeyboardMappingPreset(camera.getKind()); +} + +const IGimbalBindingLayout::mouse_to_virtual_events_t& CCameraInputBindingUtilities::getDefaultCameraMouseMappingPreset(const core::ICamera::CameraKind kind) +{ + return interactionBindingPresetForKind(kind).mouse; +} + +const IGimbalBindingLayout::mouse_to_virtual_events_t& CCameraInputBindingUtilities::getDefaultCameraMouseMappingPreset(const core::ICamera& camera) +{ + return getDefaultCameraMouseMappingPreset(camera.getKind()); +} + +IGimbalBindingLayout::imguizmo_to_virtual_events_t CCameraInputBindingUtilities::buildDefaultCameraImguizmoMappingPreset(const uint32_t allowedVirtualEvents) +{ + return makeImguizmoPreset(allowedVirtualEvents); +} + +IGimbalBindingLayout::imguizmo_to_virtual_events_t CCameraInputBindingUtilities::buildDefaultCameraImguizmoMappingPreset(const core::ICamera& camera) +{ + return buildDefaultCameraImguizmoMappingPreset(camera.getAllowedVirtualEvents()); +} + +SCameraInputBindingPreset CCameraInputBindingUtilities::buildDefaultCameraInputBindingPreset( + const core::ICamera::CameraKind kind, + const uint32_t allowedVirtualEvents) +{ + SCameraInputBindingPreset preset; + preset.keyboard = getDefaultCameraKeyboardMappingPreset(kind); + preset.mouse = getDefaultCameraMouseMappingPreset(kind); + preset.imguizmo = buildDefaultCameraImguizmoMappingPreset(allowedVirtualEvents); + return preset; +} + +SCameraInputBindingPreset CCameraInputBindingUtilities::buildDefaultCameraInputBindingPreset(const core::ICamera& camera) +{ + return buildDefaultCameraInputBindingPreset(camera.getKind(), camera.getAllowedVirtualEvents()); +} + +void CCameraInputBindingUtilities::applyDefaultCameraInputBindingPreset( + IGimbalBindingLayout& layout, + const core::ICamera::CameraKind kind, + const uint32_t allowedVirtualEvents) +{ + const auto preset = buildDefaultCameraInputBindingPreset(kind, allowedVirtualEvents); + layout.updateKeyboardMapping([&](auto& map) { map = preset.keyboard; }); + layout.updateMouseMapping([&](auto& map) { map = preset.mouse; }); + layout.updateImguizmoMapping([&](auto& map) { map = preset.imguizmo; }); +} + +void CCameraInputBindingUtilities::applyDefaultCameraInputBindingPreset(IGimbalBindingLayout& layout, const core::ICamera& camera) +{ + applyDefaultCameraInputBindingPreset(layout, camera.getKind(), camera.getAllowedVirtualEvents()); +} + +} // namespace nbl::ui diff --git a/src/nbl/ext/Cameras/CCameraJsonPersistenceUtilities.hpp b/src/nbl/ext/Cameras/CCameraJsonPersistenceUtilities.hpp new file mode 100644 index 0000000000..d38f8ac492 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraJsonPersistenceUtilities.hpp @@ -0,0 +1,101 @@ +#ifndef _NBL_EXT_CAMERAS_JSON_PERSISTENCE_UTILITIES_HPP_INCLUDED_ +#define _NBL_EXT_CAMERAS_JSON_PERSISTENCE_UTILITIES_HPP_INCLUDED_ + +#include + +#include "nbl/ext/Cameras/CCameraFileUtilities.hpp" +#include "nbl/ext/Cameras/CCameraGoal.hpp" +#include "nbl/ext/Cameras/CCameraPresetFlow.hpp" +#include "nlohmann/json.hpp" + +namespace nbl::system::impl +{ + +struct CCameraJsonPersistenceUtilities final +{ + template + static inline void deserializeGoalJson(const Json& entry, core::CCameraGoal& goal) + { + goal = {}; + + if (entry.contains("camera_kind")) + goal.sourceKind = static_cast(entry["camera_kind"].get()); + if (entry.contains("camera_capabilities")) + goal.sourceCapabilities = core::ICamera::capability_flags_t(entry["camera_capabilities"].get()); + if (entry.contains("camera_goal_state_mask")) + goal.sourceGoalStateMask = core::ICamera::goal_state_flags_t(entry["camera_goal_state_mask"].get()); + + if (entry.contains("position") && entry["position"].is_array()) + { + const auto values = entry["position"].get>(); + goal.position = hlsl::float64_t3(values[0], values[1], values[2]); + } + if (entry.contains("orientation") && entry["orientation"].is_array()) + { + const auto values = entry["orientation"].get>(); + goal.orientation = hlsl::CCameraMathUtilities::makeQuaternionFromComponents(values[0], values[1], values[2], values[3]); + } + if (entry.contains("target_position") && entry["target_position"].is_array()) + { + const auto values = entry["target_position"].get>(); + goal.targetPosition = hlsl::float64_t3(values[0], values[1], values[2]); + goal.hasTargetPosition = true; + } + if (entry.contains("distance")) + { + goal.distance = entry["distance"].get(); + goal.hasDistance = true; + } + if (entry.contains("orbit_u")) + { + goal.orbitUv.x = entry["orbit_u"].get(); + goal.hasOrbitState = true; + } + if (entry.contains("orbit_v")) + { + goal.orbitUv.y = entry["orbit_v"].get(); + goal.hasOrbitState = true; + } + if (entry.contains("orbit_distance")) + { + goal.orbitDistance = entry["orbit_distance"].get(); + goal.hasOrbitState = true; + } + if (entry.contains("path_s") && entry.contains("path_u") && entry.contains("path_v")) + { + goal.pathState.s = entry["path_s"].get(); + goal.pathState.u = entry["path_u"].get(); + goal.pathState.v = entry["path_v"].get(); + goal.pathState.roll = entry.contains("path_roll") ? entry["path_roll"].get() : 0.0; + goal.hasPathState = true; + } + if (entry.contains("dynamic_base_fov")) + { + goal.dynamicPerspectiveState.baseFov = entry["dynamic_base_fov"].get(); + goal.hasDynamicPerspectiveState = true; + } + if (entry.contains("dynamic_reference_distance")) + { + goal.dynamicPerspectiveState.referenceDistance = entry["dynamic_reference_distance"].get(); + goal.hasDynamicPerspectiveState = true; + } + } + + template + static inline void deserializePresetJson(const Json& entry, core::CCameraPreset& preset) + { + preset = {}; + if (entry.contains("name")) + preset.name = entry["name"].get(); + if (entry.contains("identifier")) + preset.identifier = entry["identifier"].get(); + + core::CCameraGoal goal = {}; + deserializeGoalJson(entry, goal); + core::CCameraPresetUtilities::assignGoalToPreset(preset, goal); + } +}; + +} // namespace nbl::system::impl + +#endif // _NBL_EXT_CAMERAS_JSON_PERSISTENCE_UTILITIES_HPP_INCLUDED_ diff --git a/src/nbl/ext/Cameras/CCameraKeyframeTrack.cpp b/src/nbl/ext/Cameras/CCameraKeyframeTrack.cpp new file mode 100644 index 0000000000..f7aefd12cd --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraKeyframeTrack.cpp @@ -0,0 +1,152 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraKeyframeTrack.hpp" + +namespace nbl::core +{ + +bool CCameraKeyframeTrackUtilities::compareKeyframes(const CCameraKeyframe& lhs, const CCameraKeyframe& rhs, + const double timeEps, const double posEps, const double rotEpsDeg, const double scalarEps) +{ + return hlsl::abs(static_cast(lhs.time - rhs.time)) <= timeEps && + CCameraPresetUtilities::comparePresets(lhs.preset, rhs.preset, posEps, rotEpsDeg, scalarEps); +} + +bool CCameraKeyframeTrackUtilities::compareKeyframeTracks(const CCameraKeyframeTrack& lhs, const CCameraKeyframeTrack& rhs, + const double timeEps, const double posEps, const double rotEpsDeg, const double scalarEps, const bool compareSelection) +{ + if ((compareSelection && lhs.selectedKeyframeIx != rhs.selectedKeyframeIx) || lhs.keyframes.size() != rhs.keyframes.size()) + return false; + + for (size_t i = 0u; i < lhs.keyframes.size(); ++i) + { + if (!compareKeyframes(lhs.keyframes[i], rhs.keyframes[i], timeEps, posEps, rotEpsDeg, scalarEps)) + return false; + } + + return true; +} + +bool CCameraKeyframeTrackUtilities::compareKeyframeTrackContent(const CCameraKeyframeTrack& lhs, const CCameraKeyframeTrack& rhs, + const double timeEps, const double posEps, const double rotEpsDeg, const double scalarEps) +{ + return compareKeyframeTracks(lhs, rhs, timeEps, posEps, rotEpsDeg, scalarEps, false); +} + +bool CCameraKeyframeTrackUtilities::tryBuildKeyframeTrackPresetAtTime(const CCameraKeyframeTrack& track, const float time, CCameraPreset& preset) +{ + if (track.keyframes.empty()) + return false; + + if (track.keyframes.size() == 1u) + { + preset = track.keyframes.front().preset; + return true; + } + + const auto clampedTime = std::clamp(time, 0.f, track.keyframes.back().time); + size_t idx = 0u; + while (idx + 1u < track.keyframes.size() && track.keyframes[idx + 1u].time < clampedTime) + ++idx; + + const auto& a = track.keyframes[idx]; + const auto& b = track.keyframes[std::min(idx + 1u, track.keyframes.size() - 1u)]; + if (b.time <= a.time) + { + preset = a.preset; + return true; + } + + const double alpha = static_cast(clampedTime - a.time) / static_cast(b.time - a.time); + preset = a.preset; + CCameraPresetUtilities::assignGoalToPreset( + preset, + CCameraGoalUtilities::blendGoals( + CCameraPresetUtilities::makeGoalFromPreset(a.preset), + CCameraPresetUtilities::makeGoalFromPreset(b.preset), + alpha)); + return true; +} + +void CCameraKeyframeTrackUtilities::sortKeyframeTrackByTime(CCameraKeyframeTrack& track) +{ + std::sort(track.keyframes.begin(), track.keyframes.end(), [](const auto& a, const auto& b) { return a.time < b.time; }); +} + +void CCameraKeyframeTrackUtilities::clampTrackTimeToKeyframes(const CCameraKeyframeTrack& track, float& time) +{ + if (track.keyframes.empty()) + { + time = 0.f; + return; + } + + time = std::clamp(time, 0.f, track.keyframes.back().time); +} + +int CCameraKeyframeTrackUtilities::selectKeyframeTrackNearestTime(CCameraKeyframeTrack& track, const float time) +{ + if (track.keyframes.empty()) + { + track.selectedKeyframeIx = -1; + return track.selectedKeyframeIx; + } + + size_t bestIx = 0u; + float bestDelta = hlsl::abs(track.keyframes.front().time - time); + for (size_t i = 1u; i < track.keyframes.size(); ++i) + { + const float delta = hlsl::abs(track.keyframes[i].time - time); + if (delta < bestDelta) + { + bestDelta = delta; + bestIx = i; + } + } + + track.selectedKeyframeIx = static_cast(bestIx); + return track.selectedKeyframeIx; +} + +void CCameraKeyframeTrackUtilities::normalizeSelectedKeyframeTrack(CCameraKeyframeTrack& track) +{ + if (track.keyframes.empty()) + { + track.selectedKeyframeIx = -1; + return; + } + + if (track.selectedKeyframeIx < 0) + track.selectedKeyframeIx = 0; + else if (track.selectedKeyframeIx >= static_cast(track.keyframes.size())) + track.selectedKeyframeIx = static_cast(track.keyframes.size()) - 1; +} + +CCameraKeyframe* CCameraKeyframeTrackUtilities::getSelectedKeyframe(CCameraKeyframeTrack& track) +{ + normalizeSelectedKeyframeTrack(track); + if (track.selectedKeyframeIx < 0) + return nullptr; + return &track.keyframes[static_cast(track.selectedKeyframeIx)]; +} + +const CCameraKeyframe* CCameraKeyframeTrackUtilities::getSelectedKeyframe(const CCameraKeyframeTrack& track) +{ + if (track.selectedKeyframeIx < 0 || track.selectedKeyframeIx >= static_cast(track.keyframes.size())) + return nullptr; + return &track.keyframes[static_cast(track.selectedKeyframeIx)]; +} + +bool CCameraKeyframeTrackUtilities::replaceSelectedKeyframePreset(CCameraKeyframeTrack& track, CCameraPreset preset) +{ + auto* selected = getSelectedKeyframe(track); + if (!selected) + return false; + + selected->preset = std::move(preset); + return true; +} + +} // namespace nbl::core diff --git a/src/nbl/ext/Cameras/CCameraMathUtilities.cpp b/src/nbl/ext/Cameras/CCameraMathUtilities.cpp new file mode 100644 index 0000000000..4c88fb81c4 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraMathUtilities.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2025 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraMathUtilities.hpp" + +namespace nbl::hlsl +{ + +namespace +{ + +template +camera_quaternion_t makeQuaternionFromBasisWithCast( + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward) +{ + const camera_matrix_t basis(right, up, forward); + const auto candidate = _static_cast>(basis); + if (!CCameraMathUtilities::isFiniteQuaternion(candidate)) + return CCameraMathUtilities::makeIdentityQuaternion(); + + return CCameraMathUtilities::normalizeQuaternion(candidate); +} + +} // namespace + +camera_quaternion_t CCameraMathUtilities::makeQuaternionFromBasisImpl( + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward) +{ + return makeQuaternionFromBasisWithCast(right, up, forward); +} + +camera_quaternion_t CCameraMathUtilities::makeQuaternionFromBasisImpl( + const camera_vector_t& right, + const camera_vector_t& up, + const camera_vector_t& forward) +{ + return makeQuaternionFromBasisWithCast(right, up, forward); +} + +} // namespace nbl::hlsl diff --git a/src/nbl/ext/Cameras/CCameraPathUtilities.cpp b/src/nbl/ext/Cameras/CCameraPathUtilities.cpp new file mode 100644 index 0000000000..f7dd138084 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraPathUtilities.cpp @@ -0,0 +1,392 @@ +// Copyright (C) 2018-2025 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraPathUtilities.hpp" + +namespace nbl::core +{ + +ICamera::PathState CCameraPathUtilities::makeDefaultPathState(const double minU) +{ + return { + .s = 0.0, + .u = minU, + .v = 0.0, + .roll = 0.0 + }; +} + +SCameraPathComparisonThresholds CCameraPathUtilities::makePathComparisonThresholds( + const double angularToleranceDeg, + const double scalarTolerance) +{ + return { + .sToleranceDeg = angularToleranceDeg, + .rollToleranceDeg = angularToleranceDeg, + .scalarTolerance = scalarTolerance + }; +} + +bool CCameraPathUtilities::isPathStateFinite(const ICamera::PathState& state) +{ + return hlsl::CCameraMathUtilities::isFiniteScalar(state.s) && + hlsl::CCameraMathUtilities::isFiniteScalar(state.u) && + hlsl::CCameraMathUtilities::isFiniteScalar(state.v) && + hlsl::CCameraMathUtilities::isFiniteScalar(state.roll); +} + +bool CCameraPathUtilities::isPathLimitsWellFormed(const SCameraPathLimits& limits) +{ + return hlsl::CCameraMathUtilities::isFiniteScalar(limits.minU) && + hlsl::CCameraMathUtilities::isFiniteScalar(limits.minDistance) && + !std::isnan(static_cast(limits.maxDistance)); +} + +bool CCameraPathUtilities::sanitizePathLimits(SCameraPathLimits& limits) +{ + if (!isPathLimitsWellFormed(limits)) + return false; + + limits.minU = std::max(limits.minU, 0.0); + limits.minDistance = std::max( + std::max(limits.minDistance, static_cast(limits.minU)), + static_cast(SCameraTargetRelativeTraits::MinDistance)); + + if (!std::isfinite(static_cast(limits.maxDistance))) + limits.maxDistance = std::numeric_limits::infinity(); + else + limits.maxDistance = std::max(limits.maxDistance, limits.minDistance); + return true; +} + +bool CCameraPathUtilities::sanitizePathState(ICamera::PathState& state, const double minU) +{ + return hlsl::CCameraMathUtilities::sanitizePathState(state.s, state.u, state.v, state.roll, minU); +} + +bool CCameraPathUtilities::sanitizePathState(ICamera::PathState& state, const SCameraPathLimits& limits, double* outAppliedDistance) +{ + SCameraPathLimits sanitizedLimits = limits; + if (!sanitizePathLimits(sanitizedLimits)) + return false; + + if (!sanitizePathState(state, sanitizedLimits.minU)) + return false; + + const auto desiredDistance = std::clamp( + hlsl::CCameraMathUtilities::getPathDistance(state.u, state.v), + sanitizedLimits.minDistance, + sanitizedLimits.maxDistance); + return tryScalePathStateDistance(desiredDistance, sanitizedLimits.minU, state, outAppliedDistance); +} + +bool CCameraPathUtilities::tryScalePathStateDistance( + const double desiredDistance, + const double minU, + ICamera::PathState& ioState, + double* outAppliedDistance) +{ + return hlsl::CCameraMathUtilities::tryScalePathStateDistance( + desiredDistance, + minU, + ioState.u, + ioState.v, + outAppliedDistance); +} + +bool CCameraPathUtilities::tryUpdatePathStateDistance( + const float desiredDistance, + const SCameraPathLimits& limits, + ICamera::PathState& ioState, + SCameraPathDistanceUpdateResult* outResult) +{ + SCameraPathLimits sanitizedLimits = limits; + if (!sanitizePathLimits(sanitizedLimits) || !sanitizePathState(ioState, sanitizedLimits)) + return false; + + const auto clampedDistance = std::clamp(desiredDistance, sanitizedLimits.minDistance, sanitizedLimits.maxDistance); + double appliedDistance = 0.0; + if (!tryScalePathStateDistance(static_cast(clampedDistance), sanitizedLimits.minU, ioState, &appliedDistance)) + return false; + + if (outResult) + { + outResult->appliedDistance = appliedDistance; + outResult->exact = (clampedDistance == desiredDistance) && + hlsl::CCameraMathUtilities::nearlyEqualScalar(appliedDistance, static_cast(desiredDistance), SCameraPathDefaults::ScalarTolerance); + } + return true; +} + +bool CCameraPathUtilities::tryBuildPathStateFromPosition( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const double minU, + ICamera::PathState& outState) +{ + outState = {}; + if (!hlsl::CCameraMathUtilities::tryBuildPathStateFromPosition( + targetPosition, + position, + minU, + outState.s, + outState.u, + outState.v)) + { + return false; + } + + outState.roll = 0.0; + return true; +} + +bool CCameraPathUtilities::tryResolvePathState( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const SCameraPathLimits& limits, + const ICamera::PathState* requestedState, + ICamera::PathState& outState) +{ + SCameraPathLimits sanitizedLimits = limits; + if (!sanitizePathLimits(sanitizedLimits)) + return false; + + if (requestedState) + { + outState = *requestedState; + return sanitizePathState(outState, sanitizedLimits); + } + + if (tryBuildPathStateFromPosition(targetPosition, position, sanitizedLimits.minU, outState)) + return sanitizePathState(outState, sanitizedLimits); + + outState = makeDefaultPathState(sanitizedLimits.minU); + return sanitizePathState(outState, sanitizedLimits); +} + +bool CCameraPathUtilities::tryBuildPathPoseFromState( + const hlsl::float64_t3& targetPosition, + const ICamera::PathState& state, + const SCameraPathLimits& limits, + SCameraPathPose& outPose) +{ + SCameraPathLimits sanitizedLimits = limits; + if (!sanitizePathLimits(sanitizedLimits)) + return false; + + return hlsl::CCameraMathUtilities::tryBuildPathPoseFromState( + targetPosition, + state.s, + state.u, + state.v, + state.roll, + sanitizedLimits.minU, + sanitizedLimits.minDistance, + sanitizedLimits.maxDistance, + outPose.position, + outPose.orientation, + &outPose.appliedDistance, + &outPose.orbitUv); +} + +bool CCameraPathUtilities::tryBuildPathPoseFromState( + const hlsl::float64_t3& targetPosition, + const ICamera::PathState& state, + const SCameraPathLimits& limits, + hlsl::float64_t3& outPosition, + hlsl::camera_quaternion_t& outOrientation, + hlsl::float64_t* outAppliedDistance, + hlsl::float64_t2* outOrbitUv) +{ + SCameraPathPose pathPose = {}; + if (!tryBuildPathPoseFromState(targetPosition, state, limits, pathPose)) + return false; + + outPosition = pathPose.position; + outOrientation = pathPose.orientation; + if (outAppliedDistance) + *outAppliedDistance = pathPose.appliedDistance; + if (outOrbitUv) + *outOrbitUv = pathPose.orbitUv; + return true; +} + +bool CCameraPathUtilities::pathStatesNearlyEqual( + const ICamera::PathState& lhs, + const ICamera::PathState& rhs, + const SCameraPathComparisonThresholds& thresholds) +{ + return hlsl::CCameraMathUtilities::getWrappedAngleDistanceDegrees(lhs.s, rhs.s) <= thresholds.sToleranceDeg && + hlsl::CCameraMathUtilities::nearlyEqualScalar(lhs.u, rhs.u, thresholds.scalarTolerance) && + hlsl::CCameraMathUtilities::nearlyEqualScalar(lhs.v, rhs.v, thresholds.scalarTolerance) && + hlsl::CCameraMathUtilities::getWrappedAngleDistanceDegrees(lhs.roll, rhs.roll) <= thresholds.rollToleranceDeg; +} + +bool CCameraPathUtilities::pathStatesChanged( + const ICamera::PathState& lhs, + const ICamera::PathState& rhs, + const SCameraPathComparisonThresholds& thresholds) +{ + return !pathStatesNearlyEqual(lhs, rhs, thresholds); +} + +hlsl::float64_t4 CCameraPathUtilities::buildPathStateDeltaVector( + const ICamera::PathState& currentState, + const ICamera::PathState& desiredState) +{ + auto deltaVector = desiredState.asVector() - currentState.asVector(); + deltaVector.x = hlsl::CCameraMathUtilities::wrapAngleRad(deltaVector.x); + deltaVector.w = hlsl::CCameraMathUtilities::wrapAngleRad(deltaVector.w); + return deltaVector; +} + +SCameraPathDelta CCameraPathUtilities::buildPathStateDelta( + const ICamera::PathState& currentState, + const ICamera::PathState& desiredState) +{ + return SCameraPathDelta::fromVector(buildPathStateDeltaVector(currentState, desiredState)); +} + +SCameraPathDelta CCameraPathUtilities::makePathDeltaFromVirtualPathMotion( + const hlsl::float64_t3& translation, + const hlsl::float64_t3& rotation) +{ + return SCameraPathDelta::fromMotion(translation, rotation.z); +} + +SCameraPathDelta CCameraPathUtilities::buildDefaultPathControlDelta(const SCameraPathControlContext& context) +{ + return makePathDeltaFromVirtualPathMotion(context.translation, context.rotation); +} + +void CCameraPathUtilities::appendPathDeltaEvents( + std::vector& events, + const SCameraPathDelta& delta, + const double moveDenominator, + const double rotationDenominator, + const SCameraPathComparisonThresholds& thresholds) +{ + CCameraVirtualEventUtilities::appendLocalTranslationEvents( + events, + delta.translationVector(), + hlsl::float64_t3(moveDenominator), + hlsl::float64_t3(thresholds.scalarTolerance)); + CCameraVirtualEventUtilities::appendAngularDeltaEvent( + events, + delta.roll, + rotationDenominator, + thresholds.rollToleranceDeg, + CVirtualGimbalEvent::RollRight, + CVirtualGimbalEvent::RollLeft); +} + +bool CCameraPathUtilities::tryBuildCanonicalPathState( + const hlsl::float64_t3& targetPosition, + const ICamera::PathState& state, + const SCameraPathLimits& limits, + SCameraCanonicalPathState& outState) +{ + outState = {}; + if (!tryBuildPathPoseFromState(targetPosition, state, limits, outState.pose)) + return false; + + outState.targetRelative = { + .target = targetPosition, + .orbitUv = outState.pose.orbitUv, + .distance = static_cast(outState.pose.appliedDistance) + }; + return true; +} + +bool CCameraPathUtilities::tryApplyPathStateDelta( + const ICamera::PathState& currentState, + const SCameraPathDelta& delta, + const SCameraPathLimits& limits, + ICamera::PathState& outState) +{ + auto stateVector = currentState.asVector() + delta.asVector(); + stateVector.x = hlsl::CCameraMathUtilities::wrapAngleRad(stateVector.x); + stateVector.w = hlsl::CCameraMathUtilities::wrapAngleRad(stateVector.w); + outState = ICamera::PathState::fromVector(stateVector); + return sanitizePathState(outState, limits); +} + +ICamera::PathState CCameraPathUtilities::blendPathStates( + const ICamera::PathState& from, + const ICamera::PathState& to, + const double alpha) +{ + const auto fromVector = from.asVector(); + const auto toVector = to.asVector(); + return { + .s = hlsl::CCameraMathUtilities::lerpWrappedAngleRad(fromVector.x, toVector.x, alpha), + .u = fromVector.y + (toVector.y - fromVector.y) * alpha, + .v = fromVector.z + (toVector.z - fromVector.z) * alpha, + .roll = hlsl::CCameraMathUtilities::lerpWrappedAngleRad(fromVector.w, toVector.w, alpha) + }; +} + +bool CCameraPathUtilities::tryBuildPathStateTransition( + const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& currentPosition, + const hlsl::float64_t3& desiredPosition, + const SCameraPathLimits& limits, + const ICamera::PathState* currentStateOverride, + const ICamera::PathState* desiredStateOverride, + SCameraPathStateTransition& outTransition) +{ + if (!tryResolvePathState(targetPosition, currentPosition, limits, currentStateOverride, outTransition.current)) + return false; + if (!tryResolvePathState(targetPosition, desiredPosition, limits, desiredStateOverride, outTransition.desired)) + return false; + + outTransition.delta = buildPathStateDelta(outTransition.current, outTransition.desired); + return true; +} + +SCameraPathModel CCameraPathUtilities::makeDefaultPathModel() +{ + return { + .resolveState = + [](const hlsl::float64_t3& targetPosition, + const hlsl::float64_t3& position, + const SCameraPathLimits& limits, + const ICamera::PathState* requestedState, + ICamera::PathState& outState) -> bool + { + return tryResolvePathState(targetPosition, position, limits, requestedState, outState); + }, + .controlLaw = + [](const SCameraPathControlContext& context) -> SCameraPathDelta + { + return buildDefaultPathControlDelta(context); + }, + .integrate = + [](const ICamera::PathState& currentState, + const SCameraPathDelta& delta, + const SCameraPathLimits& limits, + ICamera::PathState& outState) -> bool + { + return tryApplyPathStateDelta(currentState, delta, limits, outState); + }, + .evaluate = + [](const hlsl::float64_t3& targetPosition, + const ICamera::PathState& state, + const SCameraPathLimits& limits, + SCameraCanonicalPathState& outState) -> bool + { + return tryBuildCanonicalPathState(targetPosition, state, limits, outState); + }, + .updateDistance = + [](const float desiredDistance, + const SCameraPathLimits& limits, + ICamera::PathState& ioState, + SCameraPathDistanceUpdateResult* outResult) -> bool + { + return tryUpdatePathStateDistance(desiredDistance, limits, ioState, outResult); + } + }; +} + +} // namespace nbl::core diff --git a/src/nbl/ext/Cameras/CCameraPersistence.cpp b/src/nbl/ext/Cameras/CCameraPersistence.cpp new file mode 100644 index 0000000000..9d3a2ea5b2 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraPersistence.cpp @@ -0,0 +1,298 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraPersistence.hpp" + +#include + +#include "CCameraJsonPersistenceUtilities.hpp" +#include "nbl/ext/Cameras/CCameraSequenceScriptPersistence.hpp" +#include "nlohmann/json.hpp" + +using json_t = nlohmann::json; + +namespace nbl::system +{ + +namespace impl +{ + +struct CCameraPersistenceJsonUtilities final +{ + static json_t serializeGoalJson(const nbl::core::CCameraGoal& goal) + { + json_t json; + json["position"] = { goal.position.x, goal.position.y, goal.position.z }; + json["orientation"] = { + goal.orientation.data.x, + goal.orientation.data.y, + goal.orientation.data.z, + goal.orientation.data.w + }; + json["camera_kind"] = static_cast(goal.sourceKind); + json["camera_capabilities"] = static_cast(goal.sourceCapabilities.value); + json["camera_goal_state_mask"] = static_cast(goal.sourceGoalStateMask.value); + + if (goal.hasTargetPosition) + json["target_position"] = { goal.targetPosition.x, goal.targetPosition.y, goal.targetPosition.z }; + if (goal.hasDistance) + json["distance"] = goal.distance; + if (goal.hasOrbitState) + { + json["orbit_u"] = goal.orbitUv.x; + json["orbit_v"] = goal.orbitUv.y; + json["orbit_distance"] = goal.orbitDistance; + } + if (goal.hasPathState) + { + json["path_s"] = goal.pathState.s; + json["path_u"] = goal.pathState.u; + json["path_v"] = goal.pathState.v; + json["path_roll"] = goal.pathState.roll; + } + if (goal.hasDynamicPerspectiveState) + { + json["dynamic_base_fov"] = goal.dynamicPerspectiveState.baseFov; + json["dynamic_reference_distance"] = goal.dynamicPerspectiveState.referenceDistance; + } + + return json; + } + + static json_t serializePresetJson(const nbl::core::CCameraPreset& preset) + { + auto json = serializeGoalJson(nbl::core::CCameraPresetUtilities::makeGoalFromPreset(preset)); + json["name"] = preset.name; + json["identifier"] = preset.identifier; + return json; + } + + static json_t serializeKeyframeTrackJson(const nbl::core::CCameraKeyframeTrack& track) + { + json_t root; + root["keyframes"] = json_t::array(); + + for (const auto& keyframe : track.keyframes) + { + auto json = serializePresetJson(keyframe.preset); + json["time"] = keyframe.time; + root["keyframes"].push_back(std::move(json)); + } + + return root; + } + + static bool deserializeKeyframeTrackJson(const json_t& root, nbl::core::CCameraKeyframeTrack& track) + { + if (!root.contains("keyframes") || !root["keyframes"].is_array()) + return false; + + track = {}; + for (const auto& entry : root["keyframes"]) + { + nbl::core::CCameraKeyframe keyframe; + if (entry.contains("time")) + keyframe.time = std::max(0.f, entry["time"].get()); + CCameraJsonPersistenceUtilities::deserializePresetJson(entry, keyframe.preset); + track.keyframes.emplace_back(std::move(keyframe)); + } + + nbl::core::CCameraKeyframeTrackUtilities::sortKeyframeTrackByTime(track); + nbl::core::CCameraKeyframeTrackUtilities::normalizeSelectedKeyframeTrack(track); + return true; + } + + static json_t serializePresetCollectionJson(std::span presets) + { + json_t root; + root["presets"] = json_t::array(); + for (const auto& preset : presets) + root["presets"].push_back(serializePresetJson(preset)); + return root; + } + + static bool deserializePresetCollectionJson(const json_t& root, std::vector& presets) + { + if (!root.contains("presets") || !root["presets"].is_array()) + return false; + + std::vector loadedPresets; + loadedPresets.reserve(root["presets"].size()); + for (const auto& entry : root["presets"]) + { + nbl::core::CCameraPreset preset; + CCameraJsonPersistenceUtilities::deserializePresetJson(entry, preset); + loadedPresets.emplace_back(std::move(preset)); + } + + presets = std::move(loadedPresets); + return true; + } + + template + static bool deserializeJsonText( + std::string_view text, + Value& out, + const char* invalidPayloadMessage, + DeserializeFn&& deserializeFn, + std::string* error) + { + try + { + const auto root = json_t::parse(text); + if (!deserializeFn(root, out)) + { + if (error) + *error = invalidPayloadMessage; + return false; + } + return true; + } + catch (const json_t::exception& e) + { + if (error) + *error = e.what(); + return false; + } + } + + static bool readTextFileOrSetError(nbl::system::ISystem& system, const nbl::system::path& filePath, std::string& text, std::string* error, const char* openMessage) + { + return nbl::system::CCameraFileUtilities::readTextFile(system, filePath, text, error, openMessage); + } +}; + +} // namespace impl + +std::string CCameraPresetPersistenceUtilities::serializeGoal(const core::CCameraGoal& goal, const int indent) +{ + return impl::CCameraPersistenceJsonUtilities::serializeGoalJson(goal).dump(indent); +} + +bool CCameraPresetPersistenceUtilities::deserializeGoal(std::string_view text, core::CCameraGoal& goal, std::string* error) +{ + return impl::CCameraPersistenceJsonUtilities::deserializeJsonText( + text, + goal, + "Camera goal JSON payload is invalid.", + [](const json_t& root, core::CCameraGoal& outGoal) + { + impl::CCameraJsonPersistenceUtilities::deserializeGoalJson(root, outGoal); + return true; + }, + error); +} + +bool CCameraPresetPersistenceUtilities::saveGoalToFile(ISystem& system, const path& filePath, const core::CCameraGoal& goal, const int indent) +{ + return CCameraFileUtilities::writeTextFile(system, filePath, serializeGoal(goal, indent)); +} + +bool CCameraPresetPersistenceUtilities::loadGoalFromFile(ISystem& system, const path& filePath, core::CCameraGoal& goal, std::string* error) +{ + std::string text; + if (!impl::CCameraPersistenceJsonUtilities::readTextFileOrSetError(system, filePath, text, error, "Cannot open camera goal file.")) + return false; + + return deserializeGoal(text, goal, error); +} + +std::string CCameraPresetPersistenceUtilities::serializePreset(const core::CCameraPreset& preset, const int indent) +{ + return impl::CCameraPersistenceJsonUtilities::serializePresetJson(preset).dump(indent); +} + +bool CCameraPresetPersistenceUtilities::deserializePreset(std::string_view text, core::CCameraPreset& preset, std::string* error) +{ + return impl::CCameraPersistenceJsonUtilities::deserializeJsonText( + text, + preset, + "Camera preset JSON payload is invalid.", + [](const json_t& root, core::CCameraPreset& outPreset) + { + impl::CCameraJsonPersistenceUtilities::deserializePresetJson(root, outPreset); + return true; + }, + error); +} + +bool CCameraPresetPersistenceUtilities::savePresetToFile(ISystem& system, const path& filePath, const core::CCameraPreset& preset, const int indent) +{ + return CCameraFileUtilities::writeTextFile(system, filePath, serializePreset(preset, indent)); +} + +bool CCameraPresetPersistenceUtilities::loadPresetFromFile(ISystem& system, const path& filePath, core::CCameraPreset& preset, std::string* error) +{ + std::string text; + if (!impl::CCameraPersistenceJsonUtilities::readTextFileOrSetError(system, filePath, text, error, "Cannot open camera preset file.")) + return false; + + return deserializePreset(text, preset, error); +} + +std::string CCameraKeyframeTrackPersistenceUtilities::serializeKeyframeTrack(const core::CCameraKeyframeTrack& track, const int indent) +{ + return impl::CCameraPersistenceJsonUtilities::serializeKeyframeTrackJson(track).dump(indent); +} + +bool CCameraKeyframeTrackPersistenceUtilities::deserializeKeyframeTrack(std::string_view text, core::CCameraKeyframeTrack& track, std::string* error) +{ + return impl::CCameraPersistenceJsonUtilities::deserializeJsonText( + text, + track, + "Camera keyframe track JSON payload is invalid.", + [](const json_t& root, core::CCameraKeyframeTrack& outTrack) + { + return impl::CCameraPersistenceJsonUtilities::deserializeKeyframeTrackJson(root, outTrack); + }, + error); +} + +bool CCameraKeyframeTrackPersistenceUtilities::saveKeyframeTrackToFile(ISystem& system, const path& filePath, const core::CCameraKeyframeTrack& track, const int indent) +{ + return CCameraFileUtilities::writeTextFile(system, filePath, serializeKeyframeTrack(track, indent)); +} + +bool CCameraKeyframeTrackPersistenceUtilities::loadKeyframeTrackFromFile(ISystem& system, const path& filePath, core::CCameraKeyframeTrack& track, std::string* error) +{ + std::string text; + if (!impl::CCameraPersistenceJsonUtilities::readTextFileOrSetError(system, filePath, text, error, "Cannot open camera keyframe track file.")) + return false; + + return deserializeKeyframeTrack(text, track, error); +} + +std::string CCameraPersistenceUtilities::serializePresetCollection(std::span presets, const int indent) +{ + return impl::CCameraPersistenceJsonUtilities::serializePresetCollectionJson(presets).dump(indent); +} + +bool CCameraPersistenceUtilities::deserializePresetCollection(std::string_view text, std::vector& presets, std::string* error) +{ + return impl::CCameraPersistenceJsonUtilities::deserializeJsonText( + text, + presets, + "Camera preset collection JSON payload is invalid.", + [](const json_t& root, std::vector& outPresets) + { + return impl::CCameraPersistenceJsonUtilities::deserializePresetCollectionJson(root, outPresets); + }, + error); +} + +bool CCameraPersistenceUtilities::savePresetCollectionToFile(ISystem& system, const path& filePath, std::span presets, const int indent) +{ + return CCameraFileUtilities::writeTextFile(system, filePath, serializePresetCollection(presets, indent)); +} + +bool CCameraPersistenceUtilities::loadPresetCollectionFromFile(ISystem& system, const path& filePath, std::vector& presets, std::string* error) +{ + std::string text; + if (!impl::CCameraPersistenceJsonUtilities::readTextFileOrSetError(system, filePath, text, error, "Cannot open camera preset collection file.")) + return false; + + return deserializePresetCollection(text, presets, error); +} + +} // namespace nbl::system diff --git a/src/nbl/ext/Cameras/CCameraScriptedCheckRunner.cpp b/src/nbl/ext/Cameras/CCameraScriptedCheckRunner.cpp new file mode 100644 index 0000000000..a0e977a08a --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraScriptedCheckRunner.cpp @@ -0,0 +1,492 @@ +// Copyright (C) 2018-2025 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraScriptedCheckRunner.hpp" + +namespace nbl::system +{ + +void CCameraScriptedCheckRunnerUtilities::scriptedCheckSetStepReference( + CCameraScriptedCheckRuntimeState& state, + const hlsl::float64_t3& position, + const hlsl::camera_quaternion_t& orientation) +{ + state.step.valid = true; + state.step.position = position; + state.step.orientation = hlsl::CCameraMathUtilities::normalizeQuaternion(orientation); +} + +void CCameraScriptedCheckRunnerUtilities::scriptedCheckSetBaselineReference( + CCameraScriptedCheckRuntimeState& state, + const hlsl::float64_t3& position, + const hlsl::camera_quaternion_t& orientation) +{ + state.baseline.valid = true; + state.baseline.position = position; + state.baseline.orientation = hlsl::CCameraMathUtilities::normalizeQuaternion(orientation); + scriptedCheckSetStepReference(state, position, orientation); +} + +bool CCameraScriptedCheckRunnerUtilities::scriptedCheckComputePoseDelta( + const hlsl::float64_t3& currentPosition, + const hlsl::camera_quaternion_t& currentOrientation, + const hlsl::float64_t3& referencePosition, + const hlsl::camera_quaternion_t& referenceOrientation, + hlsl::SCameraPoseDelta& outDelta) +{ + return hlsl::CCameraMathUtilities::tryComputePoseDelta( + currentPosition, + currentOrientation, + referencePosition, + referenceOrientation, + outDelta); +} + +void CCameraScriptedCheckRunnerUtilities::appendScriptedCheckLog( + CCameraScriptedCheckFrameResult& result, + const bool failure, + std::string&& text) +{ + result.logs.push_back({ + .failure = failure, + .text = std::move(text) + }); + result.hadFailures = result.hadFailures || failure; +} + +CCameraScriptedCheckFrameResult CCameraScriptedCheckRunnerUtilities::evaluateScriptedChecksForFrame( + const std::vector& checks, + CCameraScriptedCheckRuntimeState& state, + const CCameraScriptedCheckContext& context) +{ + CCameraScriptedCheckFrameResult result = {}; + + while (state.nextCheckIndex < checks.size() && checks[state.nextCheckIndex].frame == context.frame) + { + const auto& check = checks[state.nextCheckIndex]; + + if (!context.camera) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] check frame=" << context.frame << " no active camera"; + })); + ++state.nextCheckIndex; + continue; + } + + const auto& gimbal = context.camera->getGimbal(); + const auto pos = gimbal.getPosition(); + const auto orientation = hlsl::CCameraMathUtilities::normalizeQuaternion(gimbal.getOrientation()); + const auto eulerDeg = hlsl::CCameraMathUtilities::castVector(hlsl::CCameraMathUtilities::getCameraOrientationEulerDegrees(orientation)); + + if (!hlsl::CCameraMathUtilities::isFiniteVec3(pos) || !hlsl::CCameraMathUtilities::isFiniteQuaternion(orientation) || !hlsl::CCameraMathUtilities::isFiniteVec3(eulerDeg)) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] check frame=" << context.frame << " non-finite gimbal state"; + })); + ++state.nextCheckIndex; + continue; + } + + switch (check.kind) + { + case CCameraScriptedInputCheck::Kind::Baseline: + { + scriptedCheckSetBaselineReference(state, pos, orientation); + appendScriptedCheckLog( + result, + false, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(3); + oss << "[script][pass] baseline frame=" << context.frame + << " pos=(" << pos.x << ", " << pos.y << ", " << pos.z << ")" + << " euler_deg=(" << eulerDeg.x << ", " << eulerDeg.y << ", " << eulerDeg.z << ")"; + })); + break; + } + case CCameraScriptedInputCheck::Kind::ImguizmoVirtual: + { + bool ok = true; + if (!context.imguizmoVirtual || context.imguizmoVirtualCount == 0u) + { + ok = false; + } + else + { + for (const auto& expected : check.expectedVirtualEvents) + { + bool found = false; + double actual = 0.0; + for (uint32_t i = 0u; i < context.imguizmoVirtualCount; ++i) + { + if (context.imguizmoVirtual[i].type == expected.type) + { + found = true; + actual = context.imguizmoVirtual[i].magnitude; + break; + } + } + + if (!found || hlsl::abs(actual - expected.magnitude) > check.tolerance) + { + ok = false; + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][fail] imguizmo_virtual frame=" << context.frame + << " type=" << core::CVirtualGimbalEvent::virtualEventToString(expected.type).data() + << " expected=" << expected.magnitude + << " actual=" << actual + << " tol=" << check.tolerance; + })); + } + } + } + + if (ok) + { + appendScriptedCheckLog( + result, + false, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][pass] imguizmo_virtual frame=" << context.frame + << " events=" << check.expectedVirtualEvents.size(); + })); + } + break; + } + case CCameraScriptedInputCheck::Kind::GimbalNear: + { + bool ok = true; + if (check.hasExpectedPos) + { + const double distance = hlsl::length(pos - hlsl::CCameraMathUtilities::castVector(check.expectedPos)); + if (distance > check.posTolerance) + { + ok = false; + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][fail] gimbal_near frame=" << context.frame + << " pos_diff=" << distance + << " tol=" << check.posTolerance; + })); + } + } + if (check.hasExpectedEuler) + { + const auto expectedOrientation = hlsl::CCameraMathUtilities::makeQuaternionFromEulerDegreesYXZ( + hlsl::CCameraMathUtilities::castVector(check.expectedEulerDeg)); + hlsl::SCameraPoseDelta poseDelta = {}; + if (!scriptedCheckComputePoseDelta(pos, orientation, pos, expectedOrientation, poseDelta)) + poseDelta.rotationDeg = std::numeric_limits::infinity(); + const auto rotationDeltaDeg = poseDelta.rotationDeg; + if (rotationDeltaDeg > check.eulerToleranceDeg) + { + ok = false; + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][fail] gimbal_near frame=" << context.frame + << " rot_delta_deg=" << rotationDeltaDeg + << " tol=" << check.eulerToleranceDeg; + })); + } + } + + if (ok) + { + appendScriptedCheckLog( + result, + false, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][pass] gimbal_near frame=" << context.frame; + })); + } + break; + } + case CCameraScriptedInputCheck::Kind::GimbalDelta: + { + if (!state.baseline.valid) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] gimbal_delta frame=" << context.frame << " missing baseline"; + })); + break; + } + + hlsl::SCameraPoseDelta poseDelta = {}; + if (!scriptedCheckComputePoseDelta(pos, orientation, state.baseline.position, state.baseline.orientation, poseDelta)) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] gimbal_delta frame=" << context.frame << " non-finite pose delta"; + })); + break; + } + + if (poseDelta.position > check.posTolerance || poseDelta.rotationDeg > check.eulerToleranceDeg) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][fail] gimbal_delta frame=" << context.frame + << " pos_diff=" << poseDelta.position + << " tol=" << check.posTolerance + << " rot_delta_deg=" << poseDelta.rotationDeg + << " tol=" << check.eulerToleranceDeg; + })); + } + else + { + appendScriptedCheckLog( + result, + false, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][pass] gimbal_delta frame=" << context.frame + << " pos_diff=" << poseDelta.position + << " rot_delta_deg=" << poseDelta.rotationDeg; + })); + } + break; + } + case CCameraScriptedInputCheck::Kind::GimbalStep: + { + if (!state.step.valid) + { + if (state.baseline.valid) + { + scriptedCheckSetStepReference(state, state.baseline.position, state.baseline.orientation); + } + else + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] gimbal_step frame=" << context.frame << " missing step reference"; + })); + scriptedCheckSetStepReference(state, pos, orientation); + ++state.nextCheckIndex; + continue; + } + } + + hlsl::SCameraPoseDelta poseDelta = {}; + if (!scriptedCheckComputePoseDelta(pos, orientation, state.step.position, state.step.orientation, poseDelta)) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] gimbal_step frame=" << context.frame << " non-finite pose delta"; + })); + scriptedCheckSetStepReference(state, pos, orientation); + break; + } + + bool ok = true; + bool requiresProgress = false; + bool hasProgress = false; + if (check.hasPosDeltaConstraint) + { + if (poseDelta.position > check.posTolerance) + { + ok = false; + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][fail] gimbal_step frame=" << context.frame + << " pos_delta=" << poseDelta.position + << " max=" << check.posTolerance; + })); + } + if (check.minPosDelta > 0.0f) + { + requiresProgress = true; + hasProgress = hasProgress || poseDelta.position >= check.minPosDelta; + } + } + if (check.hasEulerDeltaConstraint) + { + if (poseDelta.rotationDeg > check.eulerToleranceDeg) + { + ok = false; + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][fail] gimbal_step frame=" << context.frame + << " rot_delta_deg=" << poseDelta.rotationDeg + << " max=" << check.eulerToleranceDeg; + })); + } + if (check.minEulerDeltaDeg > 0.0f) + { + requiresProgress = true; + hasProgress = hasProgress || poseDelta.rotationDeg >= check.minEulerDeltaDeg; + } + } + if (requiresProgress && !hasProgress) + { + ok = false; + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][fail] gimbal_step frame=" << context.frame + << " missing progress pos_delta=" << poseDelta.position + << " rot_delta_deg=" << poseDelta.rotationDeg; + })); + } + + if (ok) + { + appendScriptedCheckLog( + result, + false, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][pass] gimbal_step frame=" << context.frame + << " pos_delta=" << poseDelta.position + << " rot_delta_deg=" << poseDelta.rotationDeg; + })); + } + scriptedCheckSetStepReference(state, pos, orientation); + break; + } + case CCameraScriptedInputCheck::Kind::FollowTargetLock: + { + if (!context.followConfig) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] follow_lock frame=" << context.frame << " missing follow config"; + })); + break; + } + if (!context.trackedTarget) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] follow_lock frame=" << context.frame << " missing tracked target"; + })); + break; + } + if (!context.goalSolver) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] follow_lock frame=" << context.frame << " missing goal solver"; + })); + break; + } + + SCameraFollowRegressionResult regression = {}; + std::string regressionError; + core::CCameraGoal expectedFollowGoal = {}; + const auto thresholds = CCameraFollowRegressionUtilities::makeFollowRegressionThresholds(check.posTolerance, check.eulerToleranceDeg); + const bool ok = core::CCameraFollowUtilities::tryBuildFollowGoal( + *context.goalSolver, + context.camera, + *context.trackedTarget, + *context.followConfig, + expectedFollowGoal) && + CCameraFollowRegressionUtilities::validateFollowTargetContract( + context.camera, + *context.trackedTarget, + *context.followConfig, + expectedFollowGoal, + regression, + ®ressionError, + context.followProjectionContext, + thresholds); + + if (!ok) + { + appendScriptedCheckLog( + result, + true, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << "[script][fail] follow_lock frame=" << context.frame << ' ' + << (regressionError.empty() ? "follow validation mismatch" : regressionError); + })); + } + else + { + appendScriptedCheckLog( + result, + false, + buildScriptedCheckMessage([&](std::ostringstream& oss) + { + oss << std::fixed << std::setprecision(6); + oss << "[script][pass] follow_lock frame=" << context.frame + << " angle_deg=" << regression.lockAngleDeg + << " target_distance=" << regression.targetDistance + << " screen_ndc=" << regression.projectedTarget.radius; + })); + } + break; + } + } + + ++state.nextCheckIndex; + } + + return result; +} + +} // namespace nbl::system diff --git a/src/nbl/ext/Cameras/CCameraSequenceScript.cpp b/src/nbl/ext/Cameras/CCameraSequenceScript.cpp new file mode 100644 index 0000000000..157613fe9f --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraSequenceScript.cpp @@ -0,0 +1,509 @@ +// Copyright (C) 2018-2025 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraSequenceScript.hpp" + +namespace nbl::core +{ + +bool CCameraSequenceScriptUtilities::tryParseCameraKind(std::string_view value, ICamera::CameraKind& outKind) +{ + if (value == "FPS") + outKind = ICamera::CameraKind::FPS; + else if (value == "Free") + outKind = ICamera::CameraKind::Free; + else if (value == "Orbit") + outKind = ICamera::CameraKind::Orbit; + else if (value == "Arcball") + outKind = ICamera::CameraKind::Arcball; + else if (value == "Turntable") + outKind = ICamera::CameraKind::Turntable; + else if (value == "TopDown") + outKind = ICamera::CameraKind::TopDown; + else if (value == "Isometric") + outKind = ICamera::CameraKind::Isometric; + else if (value == "Chase") + outKind = ICamera::CameraKind::Chase; + else if (value == "Dolly") + outKind = ICamera::CameraKind::Dolly; + else if (value == "DollyZoom" || value == "Dolly Zoom") + outKind = ICamera::CameraKind::DollyZoom; + else if (value == "PathRig" || value == "Path Rig") + outKind = ICamera::CameraKind::Path; + else + return false; + + return true; +} + +bool CCameraSequenceScriptUtilities::tryParseProjectionType(std::string_view value, IPlanarProjection::CProjection::ProjectionType& outType) +{ + if (value == "perspective" || value == "Perspective") + outType = IPlanarProjection::CProjection::Perspective; + else if (value == "orthographic" || value == "Orthographic") + outType = IPlanarProjection::CProjection::Orthographic; + else + return false; + + return true; +} + +void CCameraSequenceScriptUtilities::normalizeCaptureFractions(std::vector& fractions) +{ + for (auto& fraction : fractions) + fraction = std::clamp(fraction, 0.f, 1.f); + + std::sort(fractions.begin(), fractions.end()); + fractions.erase( + std::unique( + fractions.begin(), + fractions.end(), + [](const float lhs, const float rhs) + { + return hlsl::CCameraMathUtilities::nearlyEqualScalar(lhs, rhs, static_cast(SCameraToolingThresholds::ScalarTolerance)); + }), + fractions.end()); +} + +bool CCameraSequenceScriptUtilities::buildSequenceKeyframePreset(const CCameraPreset& reference, const CCameraSequenceKeyframe& authored, CCameraPreset& outPreset, std::string* error) +{ + if (authored.hasAbsolutePreset) + { + outPreset = authored.absolutePreset; + if (outPreset.identifier.empty()) + outPreset.identifier = reference.identifier; + if (outPreset.name.empty()) + outPreset.name = reference.name; + return CCameraGoalUtilities::isGoalFinite(CCameraPresetUtilities::makeGoalFromPreset(outPreset)); + } + + outPreset = reference; + if (!authored.hasDelta) + return true; + + auto goal = CCameraPresetUtilities::makeGoalFromPreset(reference); + const auto& delta = authored.delta; + + const bool hasPoseDelta = delta.hasPositionOffset || delta.hasRotationEulerDegOffset; + const bool hasSphericalDelta = delta.hasTargetOffset || delta.orbitDelta.hasAny(); + const bool hasPathDelta = delta.pathDelta.hasAny(); + + if (hasPoseDelta && (hasSphericalDelta || hasPathDelta)) + { + if (error) + *error = "Sequence keyframe delta cannot mix pose offsets with spherical/path deltas."; + return false; + } + + if (delta.hasPositionOffset) + goal.position += delta.positionOffset; + + if (delta.hasRotationEulerDegOffset) + { + goal.orientation = hlsl::CCameraMathUtilities::normalizeQuaternion( + goal.orientation * hlsl::CCameraMathUtilities::makeQuaternionFromEulerDegreesYXZ( + hlsl::CCameraMathUtilities::castVector(delta.rotationEulerDegOffset))); + } + + if (delta.hasTargetOffset) + { + if (!goal.hasTargetPosition) + { + if (error) + *error = "Sequence keyframe target_offset requires target state."; + return false; + } + goal.targetPosition += delta.targetOffset; + } + + if (delta.orbitDelta.hasAny()) + { + if (!goal.hasOrbitState) + { + if (error) + *error = "Sequence keyframe orbit deltas require spherical orbit state."; + return false; + } + + if (delta.orbitDelta.hasU) + goal.orbitUv.x = hlsl::CCameraMathUtilities::wrapAngleRad(goal.orbitUv.x + delta.orbitDelta.uvDeltaRad.x); + if (delta.orbitDelta.hasV) + { + goal.orbitUv.y = std::clamp( + goal.orbitUv.y + delta.orbitDelta.uvDeltaRad.y, + -SCameraTargetRelativeRigDefaults::ArcballPitchLimitRad, + SCameraTargetRelativeRigDefaults::ArcballPitchLimitRad); + } + if (delta.orbitDelta.hasDistance) + goal.orbitDistance += delta.orbitDelta.distanceDelta; + } + + if (delta.pathDelta.hasAny()) + { + if (!goal.hasPathState) + { + if (error) + *error = "Sequence keyframe path deltas require path state."; + return false; + } + + if (!CCameraPathUtilities::tryApplyPathStateDelta( + goal.pathState, + delta.pathDelta.buildAppliedDelta(), + CCameraPathUtilities::makeDefaultPathLimits(), + goal.pathState)) + { + if (error) + *error = "Sequence keyframe path deltas produced an invalid path state."; + return false; + } + } + + if (delta.hasDynamicBaseFovDelta || delta.hasDynamicReferenceDistanceDelta) + { + if (!goal.hasDynamicPerspectiveState) + { + if (error) + *error = "Sequence keyframe dynamic perspective deltas require dynamic perspective state."; + return false; + } + if (delta.hasDynamicBaseFovDelta) + goal.dynamicPerspectiveState.baseFov = std::clamp(goal.dynamicPerspectiveState.baseFov + delta.dynamicBaseFovDelta, 1.f, 179.f); + if (delta.hasDynamicReferenceDistanceDelta) + { + goal.dynamicPerspectiveState.referenceDistance = std::max( + 0.001f, + goal.dynamicPerspectiveState.referenceDistance + delta.dynamicReferenceDistanceDelta); + } + } + + if (hasPathDelta || hasSphericalDelta) + { + if (!CCameraGoalUtilities::applyCanonicalGoalState(goal)) + { + if (error) + { + *error = hasPathDelta ? + "Sequence keyframe failed to canonicalize path state." : + "Sequence keyframe failed to canonicalize spherical state."; + } + return false; + } + } + + if (!CCameraGoalUtilities::isGoalFinite(goal)) + { + if (error) + *error = "Sequence keyframe produced a non-finite goal."; + return false; + } + + CCameraPresetUtilities::assignGoalToPreset(outPreset, goal); + return true; +} + +bool CCameraSequenceScriptUtilities::buildSequenceTrackFromReference(const CCameraPreset& reference, const CCameraSequenceSegment& segment, CCameraKeyframeTrack& outTrack, std::string* error) +{ + outTrack = {}; + outTrack.keyframes.reserve(segment.keyframes.size()); + + for (const auto& entry : segment.keyframes) + { + CCameraKeyframe keyframe; + keyframe.time = std::max(0.f, entry.time); + if (!buildSequenceKeyframePreset(reference, entry, keyframe.preset, error)) + return false; + outTrack.keyframes.emplace_back(std::move(keyframe)); + } + + CCameraKeyframeTrackUtilities::sortKeyframeTrackByTime(outTrack); + CCameraKeyframeTrackUtilities::normalizeSelectedKeyframeTrack(outTrack); + return !outTrack.keyframes.empty(); +} + +bool CCameraSequenceScriptUtilities::isSequenceTrackedTargetPoseFinite(const CCameraSequenceTrackedTargetPose& pose) +{ + return hlsl::CCameraMathUtilities::isFiniteVec3(pose.position) && + hlsl::CCameraMathUtilities::isFiniteQuaternion(pose.orientation); +} + +bool CCameraSequenceScriptUtilities::buildSequenceTrackedTargetPoseFromReference( + const CCameraSequenceTrackedTargetPose& reference, + const CCameraSequenceTrackedTargetKeyframe& authored, + CCameraSequenceTrackedTargetPose& outPose, + std::string* error) +{ + outPose = reference; + + if (authored.hasAbsolutePosition) + outPose.position = authored.absolutePosition; + if (authored.hasAbsoluteRotationEulerDeg) + { + outPose.orientation = hlsl::CCameraMathUtilities::makeQuaternionFromEulerDegreesYXZ( + hlsl::CCameraMathUtilities::castVector(authored.absoluteRotationEulerDeg)); + } + + if (authored.hasDelta) + { + if (authored.delta.hasPositionOffset) + outPose.position += authored.delta.positionOffset; + if (authored.delta.hasRotationEulerDegOffset) + { + outPose.orientation = hlsl::CCameraMathUtilities::normalizeQuaternion( + outPose.orientation * hlsl::CCameraMathUtilities::makeQuaternionFromEulerDegreesYXZ( + hlsl::CCameraMathUtilities::castVector(authored.delta.rotationEulerDegOffset))); + } + } + + if (!isSequenceTrackedTargetPoseFinite(outPose)) + { + if (error) + *error = "Sequence target keyframe produced a non-finite pose."; + return false; + } + + return true; +} + +bool CCameraSequenceScriptUtilities::buildSequenceTrackedTargetTrackFromReference( + const CCameraSequenceTrackedTargetPose& reference, + const CCameraSequenceSegment& segment, + CCameraSequenceTrackedTargetTrack& outTrack, + std::string* error) +{ + outTrack = {}; + outTrack.keyframes.reserve(segment.targetKeyframes.size()); + + for (const auto& entry : segment.targetKeyframes) + { + CCameraSequenceTrackedTargetTrack::SKeyframe keyframe; + keyframe.time = std::max(0.f, entry.time); + if (!buildSequenceTrackedTargetPoseFromReference(reference, entry, keyframe.pose, error)) + return false; + outTrack.keyframes.emplace_back(std::move(keyframe)); + } + + std::stable_sort( + outTrack.keyframes.begin(), + outTrack.keyframes.end(), + [](const auto& lhs, const auto& rhs) + { + if (lhs.time == rhs.time) + return false; + return lhs.time < rhs.time; + }); + + std::vector normalized; + normalized.reserve(outTrack.keyframes.size()); + for (const auto& keyframe : outTrack.keyframes) + { + if (!normalized.empty() && + hlsl::CCameraMathUtilities::nearlyEqualScalar( + normalized.back().time, + keyframe.time, + static_cast(SCameraToolingThresholds::ScalarTolerance))) + { + normalized.back() = keyframe; + } + else + { + normalized.emplace_back(keyframe); + } + } + outTrack.keyframes = std::move(normalized); + + return !outTrack.keyframes.empty(); +} + +bool CCameraSequenceScriptUtilities::tryBuildSequenceTrackedTargetPoseAtTime( + const CCameraSequenceTrackedTargetTrack& track, + const float time, + CCameraSequenceTrackedTargetPose& outPose) +{ + if (track.keyframes.empty()) + return false; + if (track.keyframes.size() == 1u || time <= track.keyframes.front().time) + { + outPose = track.keyframes.front().pose; + return true; + } + if (time >= track.keyframes.back().time) + { + outPose = track.keyframes.back().pose; + return true; + } + + for (size_t ix = 1u; ix < track.keyframes.size(); ++ix) + { + const auto& lhs = track.keyframes[ix - 1u]; + const auto& rhs = track.keyframes[ix]; + if (time > rhs.time) + continue; + + const auto span = std::max(static_cast(SCameraToolingThresholds::ScalarTolerance), rhs.time - lhs.time); + const auto alpha = std::clamp((time - lhs.time) / span, 0.f, 1.f); + outPose.position = lhs.pose.position + (rhs.pose.position - lhs.pose.position) * static_cast(alpha); + outPose.orientation = hlsl::CCameraMathUtilities::slerpQuaternion(lhs.pose.orientation, rhs.pose.orientation, static_cast(alpha)); + return true; + } + + outPose = track.keyframes.back().pose; + return true; +} + +bool CCameraSequenceScriptUtilities::sequenceSegmentUsesTrackedTargetTrack(const CCameraSequenceSegment& segment) +{ + return !segment.targetKeyframes.empty(); +} + +float CCameraSequenceScriptUtilities::getSequenceSegmentDurationSeconds(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment, const CCameraKeyframeTrack* track) +{ + if (segment.hasDurationSeconds) + return std::max(0.f, segment.durationSeconds); + if (script.defaults.durationSeconds > 0.f) + return script.defaults.durationSeconds; + if (track) + return track->keyframes.empty() ? 0.f : track->keyframes.back().time; + return 0.f; +} + +const std::vector& CCameraSequenceScriptUtilities::getSequenceSegmentPresentations(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment) +{ + return segment.presentations.empty() ? script.defaults.presentations : segment.presentations; +} + +CCameraSequenceContinuitySettings CCameraSequenceScriptUtilities::getSequenceSegmentContinuity(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment) +{ + return segment.hasContinuity ? segment.continuity : script.defaults.continuity; +} + +std::vector CCameraSequenceScriptUtilities::getSequenceSegmentCaptureFractions(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment) +{ + auto captures = segment.hasCaptureFractions ? segment.captureFractions : script.defaults.captureFractions; + normalizeCaptureFractions(captures); + return captures; +} + +bool CCameraSequenceScriptUtilities::getSequenceSegmentResetCamera(const CCameraSequenceScript& script, const CCameraSequenceSegment& segment) +{ + return segment.hasResetCamera ? segment.resetCamera : script.defaults.resetCamera; +} + +bool CCameraSequenceScriptUtilities::sequenceScriptUsesMultiplePresentations(const CCameraSequenceScript& script) +{ + if (script.defaults.presentations.size() > 1u) + return true; + + for (const auto& segment : script.segments) + { + if (getSequenceSegmentPresentations(script, segment).size() > 1u) + return true; + } + + return false; +} + +uint64_t CCameraSequenceScriptUtilities::buildSequenceDurationFrames(const float durationSeconds, const float fps) +{ + const auto safeDuration = std::max(0.f, durationSeconds); + const auto safeFps = std::max(1.f, fps); + return std::max(1ull, static_cast(std::llround(static_cast(safeDuration) * static_cast(safeFps)))); +} + +void CCameraSequenceScriptUtilities::buildSequenceSampleTimes(const float durationSeconds, const uint64_t durationFrames, std::vector& outTimes) +{ + outTimes.clear(); + outTimes.reserve(durationFrames); + + for (uint64_t frameOffset = 0u; frameOffset < durationFrames; ++frameOffset) + { + const float alpha = durationFrames > 1u ? static_cast(frameOffset) / static_cast(durationFrames - 1u) : 0.f; + outTimes.emplace_back(durationSeconds * alpha); + } +} + +void CCameraSequenceScriptUtilities::buildSequenceCaptureFrameOffsets( + const uint64_t durationFrames, + const std::vector& captureFractions, + std::vector& outOffsets) +{ + outOffsets.clear(); + outOffsets.reserve(captureFractions.size()); + + for (const auto fraction : captureFractions) + { + const auto offset = durationFrames > 1u ? + static_cast(std::llround(static_cast(fraction) * static_cast(durationFrames - 1u))) : + 0ull; + outOffsets.emplace_back(offset); + } + + std::sort(outOffsets.begin(), outOffsets.end()); + outOffsets.erase(std::unique(outOffsets.begin(), outOffsets.end()), outOffsets.end()); +} + +bool CCameraSequenceScriptUtilities::compileSequenceSegmentFromReference( + const CCameraSequenceScript& script, + const CCameraSequenceSegment& segment, + const CCameraPreset& referencePreset, + const CCameraSequenceTrackedTargetPose& referenceTrackedTargetPose, + CCameraSequenceCompiledSegment& outSegment, + std::string* error) +{ + outSegment = {}; + outSegment.name = segment.name; + outSegment.presentations = getSequenceSegmentPresentations(script, segment); + outSegment.continuity = getSequenceSegmentContinuity(script, segment); + outSegment.resetCamera = getSequenceSegmentResetCamera(script, segment); + + if (!buildSequenceTrackFromReference(referencePreset, segment, outSegment.track, error)) + return false; + + if (sequenceSegmentUsesTrackedTargetTrack(segment) && + !buildSequenceTrackedTargetTrackFromReference(referenceTrackedTargetPose, segment, outSegment.trackedTargetTrack, error)) + { + return false; + } + + outSegment.durationSeconds = getSequenceSegmentDurationSeconds(script, segment, &outSegment.track); + outSegment.durationFrames = buildSequenceDurationFrames(outSegment.durationSeconds, script.fps); + buildSequenceSampleTimes(outSegment.durationSeconds, outSegment.durationFrames, outSegment.sampleTimes); + buildSequenceCaptureFrameOffsets(outSegment.durationFrames, getSequenceSegmentCaptureFractions(script, segment), outSegment.captureFrameOffsets); + return true; +} + +bool CCameraSequenceScriptUtilities::buildCompiledSegmentFramePolicies( + const CCameraSequenceCompiledSegment& segment, + std::vector& outPolicies, + const bool includeFollowTargetLock) +{ + if (segment.sampleTimes.size() != segment.durationFrames) + return false; + + outPolicies.clear(); + outPolicies.reserve(segment.durationFrames); + + size_t captureIx = 0u; + for (uint64_t frameOffset = 0u; frameOffset < segment.durationFrames; ++frameOffset) + { + CCameraSequenceCompiledFramePolicy policy; + policy.frameOffset = frameOffset; + policy.sampleTime = segment.sampleTimes[frameOffset]; + policy.baseline = segment.continuity.baseline && frameOffset == 0u; + policy.continuityStep = segment.continuity.step && frameOffset > 0u; + policy.followTargetLock = includeFollowTargetLock && segment.usesTrackedTargetTrack() && policy.continuityStep; + + while (captureIx < segment.captureFrameOffsets.size() && segment.captureFrameOffsets[captureIx] < frameOffset) + ++captureIx; + policy.capture = captureIx < segment.captureFrameOffsets.size() && segment.captureFrameOffsets[captureIx] == frameOffset; + if (policy.capture) + ++captureIx; + + outPolicies.emplace_back(std::move(policy)); + } + + return true; +} + +} // namespace nbl::core diff --git a/src/nbl/ext/Cameras/CCameraSequenceScriptPersistence.cpp b/src/nbl/ext/Cameras/CCameraSequenceScriptPersistence.cpp new file mode 100644 index 0000000000..5d01fdc118 --- /dev/null +++ b/src/nbl/ext/Cameras/CCameraSequenceScriptPersistence.cpp @@ -0,0 +1,558 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/CCameraSequenceScriptPersistence.hpp" + +#include +#include +#include +#include + +#include "CCameraJsonPersistenceUtilities.hpp" +#include "nbl/ext/Cameras/CCameraFileUtilities.hpp" +#include "nlohmann/json.hpp" + +using json_t = nlohmann::json; + +namespace nbl::system +{ + +namespace impl +{ + +struct CCameraSequenceScriptJsonUtilities final +{ +static bool tryParseCaptureFractionJson(const json_t& entry, float& outFraction) +{ + if (entry.is_number()) + { + outFraction = std::clamp(entry.get(), 0.f, 1.f); + return true; + } + + if (!entry.is_string()) + return false; + + const auto tag = entry.get(); + if (tag == "start") + outFraction = 0.f; + else if (tag == "mid" || tag == "middle") + outFraction = 0.5f; + else if (tag == "end") + outFraction = 1.f; + else + return false; + + return true; +} + +template +static void readVector3(const json_t& entry, T& outValue) +{ + using scalar_t = std::remove_reference_t; + const auto values = entry.get>(); + outValue = T(values[0], values[1], values[2]); +} + +static bool deserializeSequencePresentationsJson(const json_t& root, std::vector& out, std::string* error) +{ + out.clear(); + if (!root.is_array()) + { + if (error) + *error = "Sequence presentations must be an array."; + return false; + } + + for (const auto& entry : root) + { + if (!entry.is_object() || !entry.contains("projection")) + { + if (error) + *error = "Sequence presentation entry missing \"projection\"."; + return false; + } + + nbl::core::CCameraSequencePresentation presentation; + if (!nbl::core::CCameraSequenceScriptUtilities::tryParseProjectionType(entry["projection"].get(), presentation.projection)) + { + if (error) + *error = "Sequence presentation has invalid projection type."; + return false; + } + if (entry.contains("left_handed")) + presentation.leftHanded = entry["left_handed"].get(); + out.emplace_back(presentation); + } + + return true; +} + +static bool deserializeSequenceContinuityJson(const json_t& root, nbl::core::CCameraSequenceContinuitySettings& out, std::string* error) +{ + if (!root.is_object()) + { + if (error) + *error = "Sequence continuity settings must be an object."; + return false; + } + + out = {}; + if (root.contains("baseline")) + out.baseline = root["baseline"].get(); + if (root.contains("step")) + out.step = root["step"].get(); + + if (root.contains("min_pos_delta")) + { + out.minPosDelta = root["min_pos_delta"].get(); + out.hasPosDeltaConstraint = true; + } + if (root.contains("max_pos_delta")) + { + out.maxPosDelta = root["max_pos_delta"].get(); + out.hasPosDeltaConstraint = true; + } + else if (root.contains("pos_tolerance")) + { + out.maxPosDelta = root["pos_tolerance"].get(); + out.hasPosDeltaConstraint = true; + } + + if (root.contains("min_euler_delta_deg")) + { + out.minEulerDeltaDeg = root["min_euler_delta_deg"].get(); + out.hasEulerDeltaConstraint = true; + } + if (root.contains("max_euler_delta_deg")) + { + out.maxEulerDeltaDeg = root["max_euler_delta_deg"].get(); + out.hasEulerDeltaConstraint = true; + } + else if (root.contains("euler_tolerance_deg")) + { + out.maxEulerDeltaDeg = root["euler_tolerance_deg"].get(); + out.hasEulerDeltaConstraint = true; + } + + if (root.contains("disable_pos_delta")) + out.hasPosDeltaConstraint = !root["disable_pos_delta"].get(); + if (root.contains("disable_euler_delta")) + out.hasEulerDeltaConstraint = !root["disable_euler_delta"].get(); + + if (out.step && !(out.hasPosDeltaConstraint || out.hasEulerDeltaConstraint)) + { + if (error) + *error = "Sequence continuity step checks require at least one delta constraint."; + return false; + } + + return true; +} + +static bool deserializeSequenceGoalDeltaJson(const json_t& root, nbl::core::CCameraSequenceGoalDelta& out, std::string* error) +{ + if (!root.is_object()) + { + if (error) + *error = "Sequence keyframe delta must be an object."; + return false; + } + + out = {}; + if (root.contains("position_offset")) + { + readVector3(root["position_offset"], out.positionOffset); + out.hasPositionOffset = true; + } + if (root.contains("rotation_euler_deg_offset")) + { + readVector3(root["rotation_euler_deg_offset"], out.rotationEulerDegOffset); + out.hasRotationEulerDegOffset = true; + } + if (root.contains("target_offset")) + { + readVector3(root["target_offset"], out.targetOffset); + out.hasTargetOffset = true; + } + if (root.contains("orbit_u_delta_deg")) + out.orbitDelta.setUDeltaDeg(root["orbit_u_delta_deg"].get()); + if (root.contains("orbit_v_delta_deg")) + out.orbitDelta.setVDeltaDeg(root["orbit_v_delta_deg"].get()); + if (root.contains("orbit_distance_delta")) + out.orbitDelta.setDistanceDelta(root["orbit_distance_delta"].get()); + if (root.contains("path_s_delta_deg")) + out.pathDelta.setSDeltaDeg(root["path_s_delta_deg"].get()); + if (root.contains("path_u_delta")) + out.pathDelta.setUDelta(root["path_u_delta"].get()); + if (root.contains("path_v_delta")) + out.pathDelta.setVDelta(root["path_v_delta"].get()); + if (root.contains("path_roll_delta_deg")) + out.pathDelta.setRollDeltaDeg(root["path_roll_delta_deg"].get()); + if (root.contains("dynamic_base_fov_delta")) + { + out.dynamicBaseFovDelta = root["dynamic_base_fov_delta"].get(); + out.hasDynamicBaseFovDelta = true; + } + if (root.contains("dynamic_reference_distance_delta")) + { + out.dynamicReferenceDistanceDelta = root["dynamic_reference_distance_delta"].get(); + out.hasDynamicReferenceDistanceDelta = true; + } + + return true; +} + +static bool deserializeSequenceKeyframeJson(const json_t& root, nbl::core::CCameraSequenceKeyframe& out, std::string* error) +{ + if (!root.is_object()) + { + if (error) + *error = "Sequence keyframe must be an object."; + return false; + } + + out = {}; + if (root.contains("time")) + out.time = std::max(0.f, root["time"].get()); + + if (root.contains("delta")) + { + if (!deserializeSequenceGoalDeltaJson(root["delta"], out.delta, error)) + return false; + out.hasDelta = true; + } + + if (root.contains("preset")) + { + impl::CCameraJsonPersistenceUtilities::deserializePresetJson(root["preset"], out.absolutePreset); + out.hasAbsolutePreset = true; + } + else if (root.contains("position") || root.contains("orientation") || root.contains("target_position") || + root.contains("distance") || root.contains("orbit_u") || root.contains("orbit_v") || + root.contains("orbit_distance") || root.contains("path_s") || root.contains("path_u") || + root.contains("path_v") || root.contains("path_roll") || + root.contains("dynamic_base_fov") || root.contains("dynamic_reference_distance")) + { + impl::CCameraJsonPersistenceUtilities::deserializePresetJson(root, out.absolutePreset); + out.hasAbsolutePreset = true; + } + + return true; +} + +static bool deserializeSequenceTrackedTargetDeltaJson(const json_t& root, nbl::core::CCameraSequenceTrackedTargetDelta& out, std::string* error) +{ + if (!root.is_object()) + { + if (error) + *error = "Sequence target delta must be an object."; + return false; + } + + out = {}; + if (root.contains("position_offset")) + { + readVector3(root["position_offset"], out.positionOffset); + out.hasPositionOffset = true; + } + if (root.contains("rotation_euler_deg_offset")) + { + readVector3(root["rotation_euler_deg_offset"], out.rotationEulerDegOffset); + out.hasRotationEulerDegOffset = true; + } + + return true; +} + +static bool deserializeSequenceTrackedTargetKeyframeJson(const json_t& root, nbl::core::CCameraSequenceTrackedTargetKeyframe& out, std::string* error) +{ + if (!root.is_object()) + { + if (error) + *error = "Sequence target keyframe must be an object."; + return false; + } + + out = {}; + if (root.contains("time")) + out.time = std::max(0.f, root["time"].get()); + + if (root.contains("delta")) + { + if (!deserializeSequenceTrackedTargetDeltaJson(root["delta"], out.delta, error)) + return false; + out.hasDelta = true; + } + + if (root.contains("position")) + { + readVector3(root["position"], out.absolutePosition); + out.hasAbsolutePosition = true; + } + if (root.contains("rotation_euler_deg")) + { + readVector3(root["rotation_euler_deg"], out.absoluteRotationEulerDeg); + out.hasAbsoluteRotationEulerDeg = true; + } + + return true; +} + +static bool deserializeSequenceSegmentJson(const json_t& root, nbl::core::CCameraSequenceSegment& out, std::string* error) +{ + if (!root.is_object()) + { + if (error) + *error = "Sequence segment must be an object."; + return false; + } + + out = {}; + if (root.contains("name")) + out.name = root["name"].get(); + if (root.contains("camera_identifier")) + out.cameraIdentifier = root["camera_identifier"].get(); + if (root.contains("camera_kind")) + { + if (!nbl::core::CCameraSequenceScriptUtilities::tryParseCameraKind(root["camera_kind"].get(), out.cameraKind)) + { + if (error) + *error = "Sequence segment has invalid camera_kind."; + return false; + } + } + if (root.contains("duration_seconds")) + { + out.durationSeconds = std::max(0.f, root["duration_seconds"].get()); + out.hasDurationSeconds = true; + } + if (root.contains("reset_camera")) + { + out.resetCamera = root["reset_camera"].get(); + out.hasResetCamera = true; + } + if (root.contains("presentations")) + { + if (!deserializeSequencePresentationsJson(root["presentations"], out.presentations, error)) + return false; + } + if (root.contains("continuity")) + { + if (!deserializeSequenceContinuityJson(root["continuity"], out.continuity, error)) + return false; + out.hasContinuity = true; + } + if (root.contains("captures")) + { + if (!root["captures"].is_array()) + { + if (error) + *error = "Sequence segment captures must be an array."; + return false; + } + + out.captureFractions.clear(); + for (const auto& entry : root["captures"]) + { + float fraction = 0.f; + if (!tryParseCaptureFractionJson(entry, fraction)) + { + if (error) + *error = "Sequence segment capture entry is invalid."; + return false; + } + out.captureFractions.emplace_back(fraction); + } + nbl::core::CCameraSequenceScriptUtilities::normalizeCaptureFractions(out.captureFractions); + out.hasCaptureFractions = true; + } + if (root.contains("keyframes")) + { + if (!root["keyframes"].is_array()) + { + if (error) + *error = "Sequence segment keyframes must be an array."; + return false; + } + for (const auto& entry : root["keyframes"]) + { + nbl::core::CCameraSequenceKeyframe keyframe; + if (!deserializeSequenceKeyframeJson(entry, keyframe, error)) + return false; + out.keyframes.emplace_back(std::move(keyframe)); + } + } + if (root.contains("target_keyframes")) + { + if (!root["target_keyframes"].is_array()) + { + if (error) + *error = "Sequence segment target_keyframes must be an array."; + return false; + } + for (const auto& entry : root["target_keyframes"]) + { + nbl::core::CCameraSequenceTrackedTargetKeyframe keyframe; + if (!deserializeSequenceTrackedTargetKeyframeJson(entry, keyframe, error)) + return false; + out.targetKeyframes.emplace_back(std::move(keyframe)); + } + } + + if (out.keyframes.empty()) + { + if (error) + *error = "Sequence segment requires at least one keyframe."; + return false; + } + if (out.cameraKind == nbl::core::ICamera::CameraKind::Unknown && out.cameraIdentifier.empty()) + { + if (error) + *error = "Sequence segment requires camera_kind or camera_identifier."; + return false; + } + + return true; +} + +static bool deserializeCameraSequenceScriptJson(const json_t& root, nbl::core::CCameraSequenceScript& out, std::string* error) +{ + if (!root.is_object()) + { + if (error) + *error = "Camera sequence script must be an object."; + return false; + } + + out = {}; + if (root.contains("enabled")) + out.enabled = root["enabled"].get(); + if (root.contains("log")) + out.log = root["log"].get(); + if (root.contains("exclusive")) + out.exclusive = root["exclusive"].get(); + if (root.contains("exclusive_input")) + out.exclusive = root["exclusive_input"].get() || out.exclusive; + if (root.contains("hard_fail")) + out.hardFail = root["hard_fail"].get(); + if (root.contains("visual_debug")) + out.visualDebug = root["visual_debug"].get(); + if (root.contains("visual_debug_target_fps")) + out.visualDebugTargetFps = root["visual_debug_target_fps"].get(); + if (root.contains("visual_debug_hold_seconds")) + out.visualDebugHoldSeconds = root["visual_debug_hold_seconds"].get(); + if (root.contains("enableActiveCameraMovement")) + { + out.enableActiveCameraMovement = root["enableActiveCameraMovement"].get(); + out.hasEnableActiveCameraMovement = true; + } + if (root.contains("capture_prefix")) + out.capturePrefix = root["capture_prefix"].get(); + if (root.contains("fps")) + out.fps = std::max(1.f, root["fps"].get()); + + if (root.contains("defaults")) + { + const auto& defaults = root["defaults"]; + if (!defaults.is_object()) + { + if (error) + *error = "Camera sequence defaults must be an object."; + return false; + } + + if (defaults.contains("duration_seconds")) + out.defaults.durationSeconds = std::max(0.f, defaults["duration_seconds"].get()); + if (defaults.contains("reset_camera")) + out.defaults.resetCamera = defaults["reset_camera"].get(); + if (defaults.contains("presentations")) + { + if (!deserializeSequencePresentationsJson(defaults["presentations"], out.defaults.presentations, error)) + return false; + } + if (defaults.contains("continuity")) + { + if (!deserializeSequenceContinuityJson(defaults["continuity"], out.defaults.continuity, error)) + return false; + } + if (defaults.contains("captures")) + { + if (!defaults["captures"].is_array()) + { + if (error) + *error = "Camera sequence default captures must be an array."; + return false; + } + + out.defaults.captureFractions.clear(); + for (const auto& entry : defaults["captures"]) + { + float fraction = 0.f; + if (!tryParseCaptureFractionJson(entry, fraction)) + { + if (error) + *error = "Camera sequence default capture entry is invalid."; + return false; + } + out.defaults.captureFractions.emplace_back(fraction); + } + nbl::core::CCameraSequenceScriptUtilities::normalizeCaptureFractions(out.defaults.captureFractions); + } + } + + if (!root.contains("segments") || !root["segments"].is_array()) + { + if (error) + *error = "Camera sequence script requires a \"segments\" array."; + return false; + } + + for (const auto& entry : root["segments"]) + { + nbl::core::CCameraSequenceSegment segment; + if (!deserializeSequenceSegmentJson(entry, segment, error)) + return false; + out.segments.emplace_back(std::move(segment)); + } + + if (out.segments.empty()) + { + if (error) + *error = "Camera sequence script must contain at least one segment."; + return false; + } + + return true; +} + +}; // struct CCameraSequenceScriptJsonUtilities + +} // namespace impl + +bool CCameraSequenceScriptPersistenceUtilities::deserializeCameraSequenceScript(std::string_view text, core::CCameraSequenceScript& out, std::string* error) +{ + try + { + const auto root = json_t::parse(text); + return impl::CCameraSequenceScriptJsonUtilities::deserializeCameraSequenceScriptJson(root, out, error); + } + catch (const json_t::exception& e) + { + if (error) + *error = e.what(); + return false; + } +} + +bool CCameraSequenceScriptPersistenceUtilities::loadCameraSequenceScriptFromFile(ISystem& system, const path& filePath, core::CCameraSequenceScript& out, std::string* error) +{ + std::string text; + if (!CCameraFileUtilities::readTextFile(system, filePath, text, error, "Cannot open camera sequence script file.")) + return false; + + return deserializeCameraSequenceScript(text, out, error); +} + +} // namespace nbl::system diff --git a/src/nbl/ext/Cameras/CGimbalInputBinder.cpp b/src/nbl/ext/Cameras/CGimbalInputBinder.cpp new file mode 100644 index 0000000000..89f7e55bde --- /dev/null +++ b/src/nbl/ext/Cameras/CGimbalInputBinder.cpp @@ -0,0 +1,99 @@ +#include "nbl/macros.h" + +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif + +#include "nbl/ext/Cameras/CGimbalInputBinder.hpp" + +namespace nbl::ui +{ + +uint32_t CGimbalInputBinder::SCollectedVirtualEvents::totalCount() const +{ + return keyboardCount + mouseCount + imguizmoCount; +} + +void CGimbalInputBinder::clearActiveBindings() +{ + updateKeyboardMapping([](auto& map) { map.clear(); }); + updateMouseMapping([](auto& map) { map.clear(); }); + updateImguizmoMapping([](auto& map) { map.clear(); }); +} + +void CGimbalInputBinder::clearBindingLayout() +{ + clearActiveBindings(); +} + +void CGimbalInputBinder::copyActiveBindingsFromLayout(const IGimbalBindingLayout& layout) +{ + updateKeyboardMapping([&](auto& map) { map = sanitizeMapping(layout.getKeyboardVirtualEventMap()); }); + updateMouseMapping([&](auto& map) { map = sanitizeMapping(layout.getMouseVirtualEventMap()); }); + updateImguizmoMapping([&](auto& map) { map = sanitizeMapping(layout.getImguizmoVirtualEventMap()); }); +} + +void CGimbalInputBinder::copyBindingLayoutFrom(const IGimbalBindingLayout& layout) +{ + copyActiveBindingsFromLayout(layout); +} + +void CGimbalInputBinder::copyActiveBindingsToLayout(IGimbalBindingLayout& layout) const +{ + layout.updateKeyboardMapping([&](auto& map) { map = sanitizeMapping(getKeyboardVirtualEventMap()); }); + layout.updateMouseMapping([&](auto& map) { map = sanitizeMapping(getMouseVirtualEventMap()); }); + layout.updateImguizmoMapping([&](auto& map) { map = sanitizeMapping(getImguizmoVirtualEventMap()); }); +} + +void CGimbalInputBinder::copyBindingLayoutTo(IGimbalBindingLayout& layout) const +{ + copyActiveBindingsToLayout(layout); +} + +CGimbalInputBinder::SCollectedVirtualEvents CGimbalInputBinder::collectVirtualEvents( + const std::chrono::microseconds nextPresentationTimeStamp, + const SUpdateParameters parameters) +{ + beginInputProcessing(nextPresentationTimeStamp); + + SCollectedVirtualEvents output; + uint32_t keyboardPotentialCount = 0u; + uint32_t mousePotentialCount = 0u; + uint32_t imguizmoPotentialCount = 0u; + + processKeyboard(nullptr, keyboardPotentialCount, {}); + processMouse(nullptr, mousePotentialCount, {}); + processImguizmo(nullptr, imguizmoPotentialCount, {}); + + output.events.resize(keyboardPotentialCount + mousePotentialCount + imguizmoPotentialCount); + auto* dst = output.events.data(); + + if (keyboardPotentialCount) + { + output.keyboardCount = keyboardPotentialCount; + processKeyboard(dst, output.keyboardCount, parameters.keyboardEvents); + dst += output.keyboardCount; + } + + if (mousePotentialCount) + { + output.mouseCount = mousePotentialCount; + processMouse(dst, output.mouseCount, parameters.mouseEvents); + dst += output.mouseCount; + } + + if (imguizmoPotentialCount) + { + output.imguizmoCount = imguizmoPotentialCount; + processImguizmo(dst, output.imguizmoCount, parameters.imguizmoEvents); + } + + endInputProcessing(); + output.events.resize(output.totalCount()); + return output; +} + +} // namespace nbl::ui diff --git a/src/nbl/ext/Cameras/CMakeLists.txt b/src/nbl/ext/Cameras/CMakeLists.txt new file mode 100644 index 0000000000..0af31024ec --- /dev/null +++ b/src/nbl/ext/Cameras/CMakeLists.txt @@ -0,0 +1,31 @@ +include(common) + +file(GLOB NBL_EXT_CAMERAS_HEADERS CONFIGURE_DEPENDS + "${NBL_ROOT_PATH}/include/nbl/ext/Cameras/*.hpp" +) + +file(GLOB NBL_EXT_CAMERAS_SOURCES CONFIGURE_DEPENDS + "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" +) + +file(GLOB NBL_EXT_CAMERAS_LOCAL_HEADERS CONFIGURE_DEPENDS + "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp" +) + +set_source_files_properties(${NBL_EXT_CAMERAS_LOCAL_HEADERS} PROPERTIES HEADER_FILE_ONLY ON) + +nbl_create_ext_library_project( + Cameras + "${NBL_EXT_CAMERAS_HEADERS}" + "${NBL_EXT_CAMERAS_SOURCES};${NBL_EXT_CAMERAS_LOCAL_HEADERS}" + "" + "" + "" +) + +nbl_install_file_spec( + "${NBL_ROOT_PATH}/include/nbl/ext/Cameras/README.md" + "nbl/ext/Cameras" +) + +add_library(Nabla::ext::Cameras ALIAS ${LIB_NAME}) diff --git a/src/nbl/ext/Cameras/IGimbalInputProcessor.cpp b/src/nbl/ext/Cameras/IGimbalInputProcessor.cpp new file mode 100644 index 0000000000..8c4b6df0d3 --- /dev/null +++ b/src/nbl/ext/Cameras/IGimbalInputProcessor.cpp @@ -0,0 +1,179 @@ +#include "nbl/macros.h" + +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif + +#include "nbl/ext/Cameras/IGimbalInputProcessor.hpp" + +#include + +#include "nbl/ext/Cameras/CCameraMathUtilities.hpp" + +namespace nbl::ui +{ + +void IGimbalInputProcessor::beginInputProcessing(const std::chrono::microseconds nextPresentationTimeStamp) +{ + m_nextPresentationTimeStamp = nextPresentationTimeStamp; + m_frameDeltaSeconds = clampFrameDeltaTimeSeconds(m_nextPresentationTimeStamp, m_lastVirtualUpTimeStamp); +} + +void IGimbalInputProcessor::endInputProcessing() +{ + m_lastVirtualUpTimeStamp = m_nextPresentationTimeStamp; +} + +void IGimbalInputProcessor::process(gimbal_event_t* output, uint32_t& count, const SUpdateParameters parameters) +{ + count = 0u; + uint32_t vKeyboardEventsCount = {}, vMouseEventsCount = {}, vImguizmoEventsCount = {}; + + if (output) + { + processKeyboard(output, vKeyboardEventsCount, parameters.keyboardEvents); + output += vKeyboardEventsCount; + processMouse(output, vMouseEventsCount, parameters.mouseEvents); + output += vMouseEventsCount; + processImguizmo(output, vImguizmoEventsCount, parameters.imguizmoEvents); + } + else + { + processKeyboard(nullptr, vKeyboardEventsCount, {}); + processMouse(nullptr, vMouseEventsCount, {}); + processImguizmo(nullptr, vImguizmoEventsCount, {}); + } + + count = vKeyboardEventsCount + vMouseEventsCount + vImguizmoEventsCount; +} + +void IGimbalInputProcessor::processKeyboard(gimbal_event_t* output, uint32_t& count, std::span events) +{ + processBindingMap( + m_keyboardVirtualEventMap, + output, + count, + [&](auto& map) + { + for (const auto& keyboardEvent : events) + { + if (keyboardEvent.action == input_keyboard_event_t::ECA_PRESSED) + setBindingActiveState(map, keyboardEvent.keyCode, true); + else if (keyboardEvent.action == input_keyboard_event_t::ECA_RELEASED) + setBindingActiveState(map, keyboardEvent.keyCode, false); + } + }); +} + +void IGimbalInputProcessor::processMouse(gimbal_event_t* output, uint32_t& count, std::span events) +{ + processBindingMap( + m_mouseVirtualEventMap, + output, + count, + [&](auto& map) + { + for (const auto& mouseEvent : events) + { + switch (mouseEvent.type) + { + case input_mouse_event_t::EET_CLICK: + updateMouseButtonState(map, mouseEvent.clickEvent); + break; + + case input_mouse_event_t::EET_SCROLL: + requestMagnitudeUpdateWithSignedComponents( + ZeroPivot, + hlsl::float32_t2( + static_cast(mouseEvent.scrollEvent.verticalScroll), + mouseEvent.scrollEvent.horizontalScroll), + SInputProcessorBindingGroups::MouseScroll, + map); + break; + + case input_mouse_event_t::EET_MOVEMENT: + requestMagnitudeUpdateWithSignedComponents( + ZeroPivot, + hlsl::float32_t2( + mouseEvent.movementEvent.relativeMovementX, + mouseEvent.movementEvent.relativeMovementY), + SInputProcessorBindingGroups::MouseRelativeMovement, + map); + break; + + default: + break; + } + } + }); +} + +void IGimbalInputProcessor::processImguizmo(gimbal_event_t* output, uint32_t& count, std::span events) +{ + processBindingMap( + m_imguizmoVirtualEventMap, + output, + count, + [&](auto& map) + { + for (const auto& ev : events) + { + const auto& deltaWorldTRS = ev; + + hlsl::SRigidTransformComponents world = {}; + if (!hlsl::CCameraMathUtilities::tryExtractRigidTransformComponents(deltaWorldTRS, world)) + continue; + + requestMagnitudeUpdateWithSignedComponents( + ZeroPivot, + world.translation, + SInputProcessorBindingGroups::ImguizmoTranslation, + map); + + const auto dRotationRad = hlsl::CCameraMathUtilities::getCameraOrientationEulerRadians(world.orientation); + requestMagnitudeUpdateWithSignedComponents( + ZeroPivot, + dRotationRad, + SInputProcessorBindingGroups::ImguizmoRotation, + map); + + requestMagnitudeUpdateWithSignedComponents( + UnitPivot, + world.scale, + SInputProcessorBindingGroups::ImguizmoScale, + map); + } + }); +} + +double IGimbalInputProcessor::clampFrameDeltaTimeSeconds( + const std::chrono::microseconds nextPresentationTimeStamp, + const std::chrono::microseconds lastVirtualUpTimeStamp) +{ + const auto deltaSeconds = std::chrono::duration( + nextPresentationTimeStamp - lastVirtualUpTimeStamp).count(); + if (deltaSeconds < 0.0) + return 0.0; + return std::min(deltaSeconds, MaxFrameDeltaSeconds); +} + +bool IGimbalInputProcessor::tryGetMouseButtonCode( + const ui::E_MOUSE_BUTTON button, + ui::E_MOUSE_CODE& outCode) +{ + switch (button) + { + case ui::EMB_LEFT_BUTTON: outCode = ui::EMC_LEFT_BUTTON; return true; + case ui::EMB_RIGHT_BUTTON: outCode = ui::EMC_RIGHT_BUTTON; return true; + case ui::EMB_MIDDLE_BUTTON: outCode = ui::EMC_MIDDLE_BUTTON; return true; + case ui::EMB_BUTTON_4: outCode = ui::EMC_BUTTON_4; return true; + case ui::EMB_BUTTON_5: outCode = ui::EMC_BUTTON_5; return true; + default: + return false; + } +} + +} // namespace nbl::ui diff --git a/src/nbl/ext/Cameras/ILinearProjection.cpp b/src/nbl/ext/Cameras/ILinearProjection.cpp new file mode 100644 index 0000000000..8657da95e8 --- /dev/null +++ b/src/nbl/ext/Cameras/ILinearProjection.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/ILinearProjection.hpp" + +namespace nbl::core +{ + +ILinearProjection::CProjection::CProjection() : CProjection(projection_matrix_t(1)) +{ +} + +ILinearProjection::CProjection::CProjection(const projection_matrix_t& matrix) +{ + setProjectionMatrix(matrix); +} + +void ILinearProjection::CProjection::setProjectionMatrix(const projection_matrix_t& matrix) +{ + m_projectionMatrix = matrix; + const auto det = hlsl::determinant(m_projectionMatrix); + + m_isProjectionSingular = !det; + + if (m_isProjectionSingular) + { + m_isProjectionLeftHanded = std::nullopt; + m_invProjectionMatrix = std::nullopt; + } + else + { + m_isProjectionLeftHanded = det < 0.0; + m_invProjectionMatrix = hlsl::inverse(m_projectionMatrix); + } +} + +bool ILinearProjection::setCamera(core::smart_refctd_ptr&& camera) +{ + if (!camera) + return false; + + m_camera = std::move(camera); + return true; +} + +ICamera* ILinearProjection::getCamera() +{ + return m_camera.get(); +} + +ILinearProjection::concatenated_matrix_t ILinearProjection::getMV(const model_matrix_t& model) const +{ + const auto& view = m_camera->getGimbal().getViewMatrix(); + return hlsl::mul( + hlsl::CCameraMathUtilities::promoteAffine3x4To4x4(view), + hlsl::CCameraMathUtilities::promoteAffine3x4To4x4(model)); +} + +ILinearProjection::concatenated_matrix_t ILinearProjection::getMVP(const CProjection& projection, const model_matrix_t& model) const +{ + return getMVP(projection, getMV(model)); +} + +ILinearProjection::concatenated_matrix_t ILinearProjection::getMVP(const CProjection& projection, const concatenated_matrix_t& mv) const +{ + return hlsl::mul(projection.getProjectionMatrix(), mv); +} + +ILinearProjection::inv_concatenated_matrix_t ILinearProjection::getMVInverse(const model_matrix_t& model) const +{ + const auto mv = getMV(model); + if (const auto det = hlsl::determinant(mv); det) + return hlsl::inverse(mv); + return std::nullopt; +} + +ILinearProjection::inv_concatenated_matrix_t ILinearProjection::getMVPInverse(const CProjection& projection, const model_matrix_t& model) const +{ + const auto mvp = getMVP(projection, model); + if (const auto det = hlsl::determinant(mvp); det) + return hlsl::inverse(mvp); + return std::nullopt; +} + +} // namespace nbl::core diff --git a/src/nbl/ext/Cameras/IPlanarProjection.cpp b/src/nbl/ext/Cameras/IPlanarProjection.cpp new file mode 100644 index 0000000000..47f40df79d --- /dev/null +++ b/src/nbl/ext/Cameras/IPlanarProjection.cpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#include "nbl/ext/Cameras/IPlanarProjection.hpp" + +namespace nbl::core +{ + +void IPlanarProjection::CProjection::update(const bool leftHanded, const float aspectRatio) +{ + switch (m_parameters.m_type) + { + case Perspective: + { + const auto& fov = m_parameters.m_planar.perspective.fov; + + if (leftHanded) + base_t::setProjectionMatrix(hlsl::buildProjectionMatrixPerspectiveFovLH(hlsl::radians(fov), aspectRatio, m_parameters.m_zNear, m_parameters.m_zFar)); + else + base_t::setProjectionMatrix(hlsl::buildProjectionMatrixPerspectiveFovRH(hlsl::radians(fov), aspectRatio, m_parameters.m_zNear, m_parameters.m_zFar)); + } break; + + case Orthographic: + { + const auto& orthoW = m_parameters.m_planar.orthographic.orthoWidth; + const auto viewHeight = orthoW / aspectRatio; + + if (leftHanded) + base_t::setProjectionMatrix(hlsl::buildProjectionMatrixOrthoLH(orthoW, viewHeight, m_parameters.m_zNear, m_parameters.m_zFar)); + else + base_t::setProjectionMatrix(hlsl::buildProjectionMatrixOrthoRH(orthoW, viewHeight, m_parameters.m_zNear, m_parameters.m_zFar)); + } break; + } +} + +void IPlanarProjection::CProjection::setPerspective(const float zNear, const float zFar, const float fov) +{ + m_parameters.m_type = Perspective; + m_parameters.m_planar.perspective.fov = fov; + m_parameters.m_zNear = zNear; + m_parameters.m_zFar = zFar; +} + +void IPlanarProjection::CProjection::setOrthographic(const float zNear, const float zFar, const float orthoWidth) +{ + m_parameters.m_type = Orthographic; + m_parameters.m_planar.orthographic.orthoWidth = orthoWidth; + m_parameters.m_zNear = zNear; + m_parameters.m_zFar = zFar; +} + +} // namespace nbl::core diff --git a/src/nbl/ext/ImGui/CMakeLists.txt b/src/nbl/ext/ImGui/CMakeLists.txt index e46d93b952..8fbf403602 100644 --- a/src/nbl/ext/ImGui/CMakeLists.txt +++ b/src/nbl/ext/ImGui/CMakeLists.txt @@ -21,7 +21,9 @@ nbl_create_ext_library_project( "" ) -target_link_libraries(${LIB_NAME} PUBLIC imtestengine) +target_link_libraries(${LIB_NAME} PUBLIC imtestengine imguizmo) + +add_library(Nabla::ext::ImGUI ALIAS ${LIB_NAME}) # this should be standard for all extensions set(_ARCHIVE_ENTRY_KEY_ "ImGui/builtin/hlsl") # then each one has unique archive key @@ -44,4 +46,4 @@ if(NBL_EMBED_BUILTIN_RESOURCES) ADD_CUSTOM_BUILTIN_RESOURCES(${_BR_TARGET_} RESOURCES_TO_EMBED "${_ARCHIVE_ABSOLUTE_ENTRY_PATH_}" "${_ARCHIVE_ENTRY_KEY_}" "nbl::ext::imgui::builtin" "${_OUTPUT_DIRECTORY_HEADER_}" "${_OUTPUT_DIRECTORY_SOURCE_}") LINK_BUILTIN_RESOURCES_TO_TARGET(${LIB_NAME} ${_BR_TARGET_}) -endif() \ No newline at end of file +endif()