From e4a5b11b9776a62ebc0a27da9efb444822400f1e Mon Sep 17 00:00:00 2001 From: Tobias Koppers Date: Tue, 28 Apr 2026 21:45:43 +0200 Subject: [PATCH 01/15] [turbo-tasks-backend] Single-item optimization for lost follower operations (#93200) ## What? Adds performance optimizations for the common case of single-item lost follower operations in the turbo-tasks-backend aggregation update system, along with improved tracing capabilities for debugging. ## Why? When profiling turbopack operations, we observed that many aggregation update jobs involve only a single upper losing a single follower. The previous implementation always used vectors and iteration even for these single-item cases, adding unnecessary allocation overhead. Additionally, the existing `trace_aggregation_update` feature was incomplete (compilation errors) and lacked visibility into task data and operation statistics. ## How? ### Single-item optimization - Added a dedicated `InnerOfUpperLostFollower` job variant and `inner_of_upper_lost_follower` function to handle the single upper + single follower case without vector allocation - Optimized the plural variants (`InnerOfUppersLostFollowers`, `InnerOfUppersLostFollower`, `InnerOfUpperLostFollowers`) to delegate to the singular function when they contain only one item ### Tracing improvements - Added `trace_aggregation_update_stats` feature flag that enables counters for all aggregation update job types (new_followers, lost_followers, balance_edge, optimize_task, etc.) recorded in trace spans - Fixed `trace_aggregation_update` feature compilation errors by using `task.get_task_description()` instead of `ctx.get_task_description(task_id)` - Gated the `TaskDataCategory` used in aggregation update `ctx.task(...)` calls behind a const that resolves to `Meta` by default and `All` when tracing is enabled, so traces can see full task data without affecting default performance --------- Co-authored-by: Claude Opus 4.5 --- .../crates/turbo-tasks-backend/Cargo.toml | 1 + .../turbo-tasks-backend/src/backend/mod.rs | 15 +- .../backend/operation/aggregation_update.rs | 452 +++++++++++++++++- .../backend/operation/cleanup_old_edges.rs | 45 +- .../src/backend/operation/connect_children.rs | 14 +- 5 files changed, 486 insertions(+), 41 deletions(-) diff --git a/turbopack/crates/turbo-tasks-backend/Cargo.toml b/turbopack/crates/turbo-tasks-backend/Cargo.toml index b588ac6711cf..42ed56f33044 100644 --- a/turbopack/crates/turbo-tasks-backend/Cargo.toml +++ b/turbopack/crates/turbo-tasks-backend/Cargo.toml @@ -22,6 +22,7 @@ verify_serialization = [] verify_aggregation_graph = [] verify_immutable = [] verify_determinism = ["turbo-tasks/verify_determinism"] +trace_aggregation_update_stats = [] trace_aggregation_update_queue = [] trace_aggregation_update = ["trace_aggregation_update_queue"] trace_leaf_distance_update = [] diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs index 217f857115fd..b0f09a0cf00e 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs @@ -2350,11 +2350,22 @@ impl TurboTasksBackendInner { span.record("immutable", is_immutable || is_now_immutable); if !queue.is_empty() || !old_edges.is_empty() { - #[cfg(feature = "trace_task_completion")] - let _span = tracing::trace_span!("remove old edges and prepare new children").entered(); + #[cfg(any( + feature = "trace_task_completion", + feature = "trace_aggregation_update_stats" + ))] + let _span = + tracing::trace_span!("remove old edges and prepare new children", stats = Empty) + .entered(); // Remove outdated edges first, before removing in_progress+dirty flag. // We need to make sure all outdated edges are removed before the task can potentially // be scheduled and executed again + #[cfg(feature = "trace_aggregation_update_stats")] + { + let stats = CleanupOldEdgesOperation::run(task_id, old_edges, queue, ctx); + _span.record("stats", tracing::field::debug(stats)); + } + #[cfg(not(feature = "trace_aggregation_update_stats"))] CleanupOldEdgesOperation::run(task_id, old_edges, queue, ctx); } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs index 76795e0e25fe..83aa7e6a8d96 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs @@ -45,6 +45,10 @@ const MAX_COUNT_BEFORE_YIELD: usize = 1000; /// of them per `process()` call before yielding. const FIND_AND_SCHEDULE_BATCH_SIZE: usize = 10000; const MAX_UPPERS_FOLLOWER_PRODUCT: usize = 31; +#[cfg(not(feature = "trace_aggregation_update"))] +const AGGREGATION_UPDATE_CATEGORY: TaskDataCategory = TaskDataCategory::Meta; +#[cfg(feature = "trace_aggregation_update")] +const AGGREGATION_UPDATE_CATEGORY: TaskDataCategory = TaskDataCategory::All; type TaskIdVec = SmallVec<[TaskId; 4]>; type TaskIdWithCountVec = SmallVec<[(TaskId, u32); 2]>; @@ -240,6 +244,12 @@ pub enum AggregationUpdateJob { }, /// Notifies multiple upper tasks that one of its inner tasks has new followers. InnerOfUppersHasNewFollowers(Box), + /// Notifies an upper task that one of its inner tasks has lost a follower. + InnerOfUpperLostFollower { + upper_id: TaskId, + lost_follower_id: TaskId, + retry: u16, + }, /// Notifies multiple upper tasks that one of its inner tasks has lost a follower. InnerOfUppersLostFollower { upper_ids: TaskIdVec, @@ -788,6 +798,30 @@ impl PartialEq for FindAndScheduleJob { impl Eq for FindAndScheduleJob {} +#[cfg(feature = "trace_aggregation_update_stats")] +#[derive(Default, Encode, Decode, Clone, Debug)] +pub struct AggregationUpdateQueueStats { + new_followers: usize, + inner_of_upper_has_new_follower: usize, + inner_of_uppers_has_new_follower: usize, + inner_of_upper_has_new_followers: usize, + lost_followers: usize, + inner_of_upper_lost_follower: usize, + inner_of_upper_lost_followers: usize, + inner_of_uppers_lost_follower: usize, + increase_active_count: usize, + decrease_active_count: usize, + balance_edge: usize, + balance_edge_batches: usize, + update_aggregation_number: usize, + update_aggregation_number_batches: usize, + optimize_task: usize, + aggregated_data_update: usize, + find_and_schedule_dirty_batches: usize, + find_and_schedule_dirty: usize, + schedule_task: usize, +} + /// Encodes the jobs in the queue. This is used to filter out transient jobs during encoding. mod encode_jobs { use bincode::{ @@ -858,6 +892,8 @@ pub struct AggregationUpdateQueue { optimize_queue: FxRingSet, #[bincode(skip, default = "FxHashMap::default")] scheduled_tasks: FxHashMap, + #[cfg(feature = "trace_aggregation_update_stats")] + pub stats: AggregationUpdateQueueStats, } impl AggregationUpdateQueue { @@ -871,6 +907,8 @@ impl AggregationUpdateQueue { balance_queue: FxRingSet::default(), optimize_queue: FxRingSet::default(), scheduled_tasks: FxHashMap::default(), + #[cfg(feature = "trace_aggregation_update_stats")] + stats: AggregationUpdateQueueStats::default(), } } @@ -884,6 +922,8 @@ impl AggregationUpdateQueue { optimize_queue, done_aggregation_number_updates: _, scheduled_tasks, + #[cfg(feature = "trace_aggregation_update_stats")] + stats: _, } = self; jobs.is_empty() && aggregation_number_updates.is_empty() @@ -1003,6 +1043,11 @@ impl AggregationUpdateQueue { let uppers = upper_ids.len(); let followers = new_follower_ids.len(); if uppers == 1 && followers == 1 { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += 1; + self.stats.inner_of_upper_has_new_follower += 1; + } self.inner_of_upper_has_new_follower( ctx, new_follower_ids[0], @@ -1020,6 +1065,11 @@ impl AggregationUpdateQueue { } else { take(upper_ids) }; + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += uppers; + self.stats.inner_of_uppers_has_new_follower += 1; + } self.inner_of_uppers_has_new_follower(ctx, new_follower_id, upper_ids); } } else if let Some(upper_id) = upper_ids.pop() { @@ -1032,6 +1082,11 @@ impl AggregationUpdateQueue { } else { take(new_follower_ids) }; + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += followers; + self.stats.inner_of_upper_has_new_followers += 1; + } self.inner_of_upper_has_new_followers(ctx, new_follower_ids, upper_id); } } @@ -1040,8 +1095,18 @@ impl AggregationUpdateQueue { new_follower_id, } => { if upper_ids.len() == 1 { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += upper_ids.len(); + self.stats.inner_of_upper_has_new_follower += 1; + } self.inner_of_upper_has_new_follower(ctx, new_follower_id, upper_ids[0], 1); } else { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += upper_ids.len(); + self.stats.inner_of_uppers_has_new_follower += 1; + } self.inner_of_uppers_has_new_follower(ctx, new_follower_id, upper_ids); } } @@ -1051,8 +1116,18 @@ impl AggregationUpdateQueue { } => { if upper_ids.len() == 1 { let (id, count) = upper_ids[0]; + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += upper_ids.len(); + self.stats.inner_of_upper_has_new_follower += 1; + } self.inner_of_upper_has_new_follower(ctx, new_follower_id, id, count); } else { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += upper_ids.len(); + self.stats.inner_of_uppers_has_new_follower += 1; + } self.inner_of_uppers_has_new_follower(ctx, new_follower_id, upper_ids); } } @@ -1061,8 +1136,18 @@ impl AggregationUpdateQueue { new_follower_ids, } => { if new_follower_ids.len() == 1 { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += new_follower_ids.len(); + self.stats.inner_of_upper_has_new_follower += 1; + } self.inner_of_upper_has_new_follower(ctx, new_follower_ids[0], upper_id, 1); } else { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += new_follower_ids.len(); + self.stats.inner_of_upper_has_new_followers += 1; + } self.inner_of_upper_has_new_followers(ctx, new_follower_ids, upper_id); } } @@ -1072,8 +1157,18 @@ impl AggregationUpdateQueue { } => { if new_follower_ids.len() == 1 { let (id, count) = new_follower_ids[0]; + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += new_follower_ids.len(); + self.stats.inner_of_upper_has_new_follower += 1; + } self.inner_of_upper_has_new_follower(ctx, id, upper_id, count); } else { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += new_follower_ids.len(); + self.stats.inner_of_upper_has_new_followers += 1; + } self.inner_of_upper_has_new_followers(ctx, new_follower_ids, upper_id); } } @@ -1081,14 +1176,45 @@ impl AggregationUpdateQueue { upper_id, new_follower_id, } => { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.new_followers += 1; + self.stats.inner_of_upper_has_new_follower += 1; + } self.inner_of_upper_has_new_follower(ctx, new_follower_id, upper_id, 1); } + AggregationUpdateJob::InnerOfUpperLostFollower { + lost_follower_id, + upper_id, + retry, + } => { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += 1; + self.stats.inner_of_upper_lost_follower += 1; + } + self.inner_of_upper_lost_follower(ctx, lost_follower_id, upper_id, retry); + } AggregationUpdateJob::InnerOfUppersLostFollowers(mut boxed) => { let InnerOfUppersLostFollowersJob { upper_ids, lost_follower_ids, } = &mut *boxed; - if upper_ids.len() > lost_follower_ids.len() { + let uppers = upper_ids.len(); + let followers = lost_follower_ids.len(); + if uppers == 1 && followers == 1 { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += 1; + self.stats.inner_of_upper_lost_follower += 1; + } + self.inner_of_upper_lost_follower( + ctx, + lost_follower_ids[0], + upper_ids[0], + 0, + ); + } else if uppers > followers { if let Some(lost_follower_id) = lost_follower_ids.pop() { let upper_ids = if !lost_follower_ids.is_empty() { let upper_ids = upper_ids.clone(); @@ -1099,6 +1225,11 @@ impl AggregationUpdateQueue { } else { take(upper_ids) }; + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += uppers; + self.stats.inner_of_upper_lost_follower += 1; + } self.inner_of_uppers_lost_follower(ctx, lost_follower_id, upper_ids, 0); } } else if let Some(upper_id) = upper_ids.pop() { @@ -1111,6 +1242,11 @@ impl AggregationUpdateQueue { } else { take(lost_follower_ids) }; + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += followers; + self.stats.inner_of_upper_lost_followers += 1; + } self.inner_of_upper_lost_followers(ctx, lost_follower_ids, upper_id, 0); } } @@ -1119,19 +1255,61 @@ impl AggregationUpdateQueue { lost_follower_id, retry, } => { - self.inner_of_uppers_lost_follower(ctx, lost_follower_id, upper_ids, retry); + if upper_ids.len() == 1 { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += upper_ids.len(); + self.stats.inner_of_upper_lost_follower += 1; + } + self.inner_of_upper_lost_follower( + ctx, + lost_follower_id, + upper_ids[0], + retry, + ); + } else { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += upper_ids.len(); + self.stats.inner_of_uppers_lost_follower += 1; + } + self.inner_of_uppers_lost_follower(ctx, lost_follower_id, upper_ids, retry); + } } AggregationUpdateJob::InnerOfUpperLostFollowers { upper_id, lost_follower_ids, retry, } => { - self.inner_of_upper_lost_followers(ctx, lost_follower_ids, upper_id, retry); + if lost_follower_ids.len() == 1 { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += lost_follower_ids.len(); + self.stats.inner_of_upper_lost_follower += 1; + } + self.inner_of_upper_lost_follower( + ctx, + lost_follower_ids[0], + upper_id, + retry, + ); + } else { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.lost_followers += lost_follower_ids.len(); + self.stats.inner_of_upper_lost_followers += 1; + } + self.inner_of_upper_lost_followers(ctx, lost_follower_ids, upper_id, retry); + } } AggregationUpdateJob::AggregatedDataUpdate(box AggregatedDataUpdateJob { upper_ids, update, }) => { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.aggregated_data_update += 1; + } self.aggregated_data_update(upper_ids, ctx, update); } AggregationUpdateJob::InvalidateDueToCollectiblesChange { @@ -1178,6 +1356,10 @@ impl AggregationUpdateQueue { } false } else if !self.aggregation_number_updates.is_empty() { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.update_aggregation_number_batches += 1; + } let mut remaining = MAX_COUNT_BEFORE_YIELD; while remaining > 0 { if let Some(( @@ -1201,6 +1383,10 @@ impl AggregationUpdateQueue { span: None, }, ); + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.update_aggregation_number += 1; + } self.update_aggregation_number(ctx, task_id, distance, base_aggregation_number); remaining -= 1; } else { @@ -1209,6 +1395,10 @@ impl AggregationUpdateQueue { } false } else if !self.balance_queue.is_empty() { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.balance_edge_batches += 1; + } let mut remaining = MAX_COUNT_BEFORE_YIELD; while remaining > 0 { if let Some(BalanceJob { @@ -1220,6 +1410,10 @@ impl AggregationUpdateQueue { { #[cfg(feature = "trace_aggregation_update_queue")] let _guard = span.map(|s| s.entered()); + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.balance_edge += 1; + } self.balance_edge(ctx, upper, task); remaining -= 1; } else { @@ -1238,6 +1432,10 @@ impl AggregationUpdateQueue { // all have the same upper count. Optimizing the root first #[cfg(feature = "trace_aggregation_update_queue")] let _guard = span.map(|s| s.entered()); + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.optimize_task += 1; + } self.optimize_task(ctx, task_id); false } else if !self.find_and_schedule.is_empty() { @@ -1247,9 +1445,18 @@ impl AggregationUpdateQueue { .min(FIND_AND_SCHEDULE_BATCH_SIZE); let jobs: SmallVec<[FindAndScheduleJob; 4]> = self.find_and_schedule.drain(..count).collect(); + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.find_and_schedule_dirty_batches += 1; + self.stats.find_and_schedule_dirty += jobs.len(); + } self.find_and_schedule_dirty(jobs, ctx); false } else if !self.scheduled_tasks.is_empty() { + #[cfg(feature = "trace_aggregation_update_stats")] + { + self.stats.schedule_task += self.scheduled_tasks.len(); + } ctx.for_each_task_all( self.scheduled_tasks.keys().copied(), "schedule tasks", @@ -1278,7 +1485,7 @@ impl AggregationUpdateQueue { upper_id, task_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let upper_aggregation_number = get_aggregation_number(&upper); let task_aggregation_number = get_aggregation_number(&task); @@ -1527,6 +1734,176 @@ impl AggregationUpdateQueue { ); } + fn inner_of_upper_lost_follower( + &mut self, + ctx: &mut impl ExecuteContext<'_>, + lost_follower_id: TaskId, + upper_id: TaskId, + mut retry: u16, + ) { + #[cfg(feature = "trace_aggregation_update")] + let _span = trace_span!("lost follower").entered(); + + // see documentation of `retry_loop` for more information why this is needed + let result = retry_loop(retry, || { + // STEP 1 + let mut follower = ctx.task( + lost_follower_id, + // For performance reasons this should stay `Meta` and not `All` + AGGREGATION_UPDATE_CATEGORY, + ); + + // STEP 2 + let mut is_upper = true; + let mut removed_upper = false; + + // STEP 3 + follower.update_upper(upper_id, |old| { + let Some(old) = old else { + is_upper = false; + return None; + }; + if old == 1 { + removed_upper = true; + return None; + } + Some(old - 1) + }); + + // STEP 4 + if is_upper { + if removed_upper { + let data = AggregatedDataUpdate::from_task(&mut follower).invert(); + let followers = get_followers(&follower); + drop(follower); + + // STEP 5 + if !data.is_empty() { + // remove data from upper + let mut upper = ctx.task( + upper_id, + // For performance reasons this should stay `Meta` and not `All` + AGGREGATION_UPDATE_CATEGORY, + ); + // STEP 6 + let diff = data.apply(&mut upper, ctx.should_track_activeness(), self); + if !diff.is_empty() { + let upper_ids = get_uppers(&upper); + self.push( + AggregatedDataUpdateJob { + upper_ids, + update: diff, + } + .into(), + ) + } + } + // STEP 7 + if !followers.is_empty() { + self.push(AggregationUpdateJob::InnerOfUpperLostFollowers { + upper_id, + lost_follower_ids: followers, + retry: 0, + }); + } + } else { + drop(follower); + } + + // STEP 8 + return ControlFlow::Break(()); + } + + drop(follower); + + // STEP 9 + let mut upper = ctx.task( + upper_id, + // For performance reasons this should stay `Meta` and not `All` + AGGREGATION_UPDATE_CATEGORY, + ); + let mut is_follower = true; + let mut removed_follower = false; + + // STEP 10 + upper.update_followers(lost_follower_id, |old| { + let Some(old) = old else { + is_follower = false; + return None; + }; + if old == 1 { + removed_follower = true; + return None; + } + Some(old - 1) + }); + + // STEP 11 + if is_follower { + if removed_follower { + // STEP 12 + // May optimize the task + if upper.followers_len().is_power_of_two() { + self.push_optimize_task(upper_id); + } + + // STEP 13 + let has_active_count = ctx.should_track_activeness() + && upper.get_activeness().is_some_and(|a| a.active_counter > 0); + let upper_ids = get_uppers(&upper); + drop(upper); + + // STEP 14 + // update active count + if has_active_count { + self.push(AggregationUpdateJob::DecreaseActiveCount { + task: lost_follower_id, + }); + } + + // STEP 15 + // notify uppers about lost follower + if !upper_ids.is_empty() { + self.push(AggregationUpdateJob::InnerOfUppersLostFollower { + upper_ids, + lost_follower_id, + retry: 0, + }); + } + } else { + drop(upper); + } + + // STEP 16 + return ControlFlow::Break(()); + } + ControlFlow::Continue(()) + }); + + // STEP 17 + if result.is_err() { + retry += 1; + if retry > MAX_RETRIES { + let lost_follower_description = ctx + .task(lost_follower_id, TaskDataCategory::Data) + .get_task_description(); + let upper_description = ctx + .task(upper_id, TaskDataCategory::Data) + .get_task_description(); + panic!( + "inner_of_upper_lost_follower is not able to remove follower \ + {lost_follower_id} ({lost_follower_description}) from {upper_id} \ + {upper_description} as it doesn't exist as upper or follower edges", + ); + } + self.push(AggregationUpdateJob::InnerOfUpperLostFollower { + upper_id, + lost_follower_id, + retry, + }); + } + } + fn inner_of_uppers_lost_follower( &mut self, ctx: &mut impl ExecuteContext<'_>, @@ -1543,7 +1920,7 @@ impl AggregationUpdateQueue { let mut follower = ctx.task( lost_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); // STEP 2 @@ -1620,7 +1997,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let mut not_a_follower = false; let mut removed_follower = false; @@ -1733,7 +2110,7 @@ impl AggregationUpdateQueue { let mut follower = ctx.task( lost_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); // STEP 2 @@ -1765,7 +2142,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); // STEP 6 let diff = data.apply(&mut upper, ctx.should_track_activeness(), self); @@ -1805,7 +2182,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); swap_retain(&mut lost_follower_ids, |&mut lost_follower_id| { let mut not_a_follower = false; @@ -1924,7 +2301,7 @@ impl AggregationUpdateQueue { let follower = ctx.task( new_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); get_aggregation_number(&follower) }; @@ -1940,7 +2317,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); // decide if it should be an inner or follower let upper_aggregation_number = get_aggregation_number(&upper); @@ -2011,7 +2388,7 @@ impl AggregationUpdateQueue { let mut new_follower = ctx.task( new_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let follower_aggregation_number = get_aggregation_number(&new_follower); @@ -2172,7 +2549,7 @@ impl AggregationUpdateQueue { let follower = ctx.task( new_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); (new_follower_id, count, get_aggregation_number(&follower)) }) @@ -2192,7 +2569,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); // decide if it should be an inner or follower @@ -2289,7 +2666,7 @@ impl AggregationUpdateQueue { let mut new_follower = ctx.task( new_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let follower_aggregation_number = get_aggregation_number(&new_follower); @@ -2347,7 +2724,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let diffs = upper_data_updates .into_iter() @@ -2388,7 +2765,7 @@ impl AggregationUpdateQueue { let upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); is_active = upper.has_activeness(); } @@ -2417,7 +2794,7 @@ impl AggregationUpdateQueue { let follower = ctx.task( new_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); get_aggregation_number(&follower) }; @@ -2434,7 +2811,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); // decide if it should be an inner or follower let upper_aggregation_number = get_aggregation_number(&upper); @@ -2502,7 +2879,7 @@ impl AggregationUpdateQueue { let mut new_follower = ctx.task( new_follower_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let follower_aggregation_number = get_aggregation_number(&new_follower); @@ -2532,7 +2909,7 @@ impl AggregationUpdateQueue { let mut upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let diff = data.apply(&mut upper, ctx.should_track_activeness(), self); if !diff.is_empty() { @@ -2560,7 +2937,7 @@ impl AggregationUpdateQueue { let upper = ctx.task( upper_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); is_active = upper.has_activeness(); } @@ -2586,7 +2963,7 @@ impl AggregationUpdateQueue { let mut task = ctx.task( task_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let state = task.get_activeness_mut_or_insert_with(|| ActivenessState::new(task_id)); let is_new = state.is_empty(); @@ -2631,7 +3008,7 @@ impl AggregationUpdateQueue { task_id, // For performance reasons this should stay Meta and not All. // persistent_task_type is now set eagerly in initialize_new_task. - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let state = task.get_activeness_mut_or_insert_with(|| ActivenessState::new(task_id)); let is_new = state.is_empty(); @@ -2678,7 +3055,7 @@ impl AggregationUpdateQueue { let mut task = ctx.task( task_id, // For performance reasons this should stay `Meta` and not `All` - TaskDataCategory::Meta, + AGGREGATION_UPDATE_CATEGORY, ); let current = task.get_aggregation_number().copied().unwrap_or_default(); let old = current.effective; @@ -2710,7 +3087,7 @@ impl AggregationUpdateQueue { #[cfg(feature = "trace_aggregation_update")] let _span = trace_span!( "update aggregation number", - task = ctx.get_task_description(task_id), + task = task.get_task_description(), old, aggregation_number ) @@ -2883,6 +3260,29 @@ impl AggregationUpdateQueue { self.push_optimize_task(task_id); } } + + #[cfg(feature = "trace_aggregation_update_stats")] + pub fn execute_with_stats( + mut self, + ctx: &mut impl ExecuteContext<'_>, + ) -> AggregationUpdateQueueStats { + loop { + ctx.operation_suspend_point(&self); + if self.process(ctx) { + return self.stats; + } + } + } + + #[cfg(not(feature = "trace_aggregation_update_stats"))] + pub fn execute_with_stats(mut self, ctx: &mut impl ExecuteContext<'_>) { + loop { + ctx.operation_suspend_point(&self); + if self.process(ctx) { + return; + } + } + } } impl Operation for AggregationUpdateQueue { diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs index 4acffba837e3..aa544b632789 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs @@ -20,7 +20,7 @@ use crate::{ data::{CellRef, CollectibleRef, CollectiblesRef}, }; -#[derive(Encode, Decode, Clone, Default)] +#[derive(Encode, Decode, Clone)] pub enum CleanupOldEdgesOperation { RemoveEdges { task_id: TaskId, @@ -30,11 +30,20 @@ pub enum CleanupOldEdgesOperation { AggregationUpdate { queue: AggregationUpdateQueue, }, - #[default] - Done, + Done { + stats: Stats, + }, // TODO Add aggregated edge } +impl Default for CleanupOldEdgesOperation { + fn default() -> Self { + Self::Done { + stats: Default::default(), + } + } +} + #[derive(Encode, Decode, Clone)] pub enum OutdatedEdge { Child(TaskId), @@ -44,24 +53,27 @@ pub enum OutdatedEdge { CollectiblesDependency(CollectiblesRef), } +#[cfg(feature = "trace_aggregation_update_stats")] +type Stats = super::aggregation_update::AggregationUpdateQueueStats; +#[cfg(not(feature = "trace_aggregation_update_stats"))] +type Stats = (); + impl CleanupOldEdgesOperation { pub fn run( task_id: TaskId, outdated: Vec, queue: AggregationUpdateQueue, ctx: &mut impl ExecuteContext<'_>, - ) { + ) -> Stats { CleanupOldEdgesOperation::RemoveEdges { task_id, outdated, queue, } - .execute(ctx); + .execute_with_stats(ctx) } -} -impl Operation for CleanupOldEdgesOperation { - fn execute(mut self, ctx: &mut impl ExecuteContext<'_>) { + fn execute_with_stats(mut self, ctx: &mut impl ExecuteContext<'_>) -> Stats { loop { ctx.operation_suspend_point(&self); match self { @@ -222,13 +234,24 @@ impl Operation for CleanupOldEdgesOperation { } CleanupOldEdgesOperation::AggregationUpdate { ref mut queue } => { if queue.process(ctx) { - self = CleanupOldEdgesOperation::Done; + self = CleanupOldEdgesOperation::Done { + #[cfg(feature = "trace_aggregation_update_stats")] + stats: take(&mut queue.stats), + #[cfg(not(feature = "trace_aggregation_update_stats"))] + stats: (), + }; } } - CleanupOldEdgesOperation::Done => { - return; + CleanupOldEdgesOperation::Done { stats } => { + return stats; } } } } } + +impl Operation for CleanupOldEdgesOperation { + fn execute(self, ctx: &mut impl ExecuteContext<'_>) { + self.execute_with_stats(ctx); + } +} diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs index c4db6a67d360..d5335b32560f 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs @@ -109,8 +109,18 @@ pub fn connect_children( } { - #[cfg(feature = "trace_task_completion")] - let _span = tracing::trace_span!("connect new children").entered(); + #[cfg(any( + feature = "trace_task_completion", + feature = "trace_aggregation_update_stats" + ))] + let _span = tracing::trace_span!("connect new children", stats = tracing::field::Empty) + .entered(); + #[cfg(feature = "trace_aggregation_update_stats")] + { + let stats = queue.execute_with_stats(ctx); + _span.record("stats", tracing::field::debug(stats)); + } + #[cfg(not(feature = "trace_aggregation_update_stats"))] queue.execute(ctx); } } From dab281a3abd0f18efa47a0d47d18d0aa4a6d5de3 Mon Sep 17 00:00:00 2001 From: Tobias Koppers Date: Tue, 28 Apr 2026 21:48:32 +0200 Subject: [PATCH 02/15] Fix `export * as X from './self'` in scope-hoisted modules (#93192) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What? Scope-hoisted execution of a module that re-exports its own namespace (`export * as X from './self'`) returned the wrong namespace for named imports of the module. Given: ```js // data.js import * as Self from './data' export function foo() { return 'foo' } export function bar() { return 'bar' } export function fooViaSelf() { return Self.foo() } // Self.foo undefined export * as Data from './data' ``` Any binding imported from `./data` that relied on the module's own namespace (e.g. `Self.foo`, `Data.foo`, `Data.Data.foo`) was `undefined` at runtime. Without scope hoisting the same code worked correctly. This PR fixes the bug and adds execution tests covering self-namespace re-exports — with and without scope hoisting — including chained re-exports (`Data.Data.foo`, `Data.Data.Data.bar`). ### Why? For an access like `Self.Data` where `Self = import * as Self from './data'` and `Data` is exposed through `export * as Data from './data'`, the namespace-member-access optimization rewrites the reference to a named import resolving to a synthesized rename module (`./data `). `ReferencedAsset::get_ident_inner` then recurses through the rename's `EsmExport::ImportedNamespace("Data")` and returns a `namespace_ident` derived from the inner module's chunk-item id — but `EsmAssetReference::code_generation` independently took `id = referenced_asset.chunk_item_id` and emitted ```js var = __turbopack_context__.i(""); ``` so the variable named like `ns(data.js)` actually held `ns(rename) = { Data: ns(data.js) }`. The non-optimized `import * as Self` uses the same mangled name and sees the rename's namespace, so `Self.foo()` evaluates to `undefined`. ### How? Keep the variable name and the `.i(...)` argument consistent by moving the "what to import" decision onto the ident itself: - `ReferencedAssetIdent::Module` gains an `import_source: ImportSource` field that describes what to import to populate the namespace variable. - `ImportSource` is an enum: - `Module { asset }` — carries a reference to the final module in any re-export chain, from which the chunk-item id is lazily computed. - `External { request, ty }` — carries everything needed to emit `__turbopack_external_import` / `__turbopack_external_require`. - The `namespace_ident` is cached in `ReferencedAssetIdent::Module` at resolution time (computed via `ImportSource::get_namespace_ident()`) so downstream sync visitors can read it without re-entering the async layer. - `ReferencedAsset::get_ident` / `get_ident_inner` populate the field. For in-group re-exports the inner module propagates up; for external references the `External` variant is used. - `EsmAssetReference::code_generation` destructures `ReferencedAssetIdent::Module { namespace_ident, ctxt, import_source, .. }` and dispatches purely on `import_source`; it no longer reads `referenced_asset` after the `get_ident` call. The hoisted-statement dedup key still uses the directly-referenced asset's id, so two references that happen to resolve to the same inner module via different paths (e.g. direct vs. through a rename) still emit separate `var` declarations for AST merging to rename. - ESM-external gating (`__turbopack_external_import` vs. `__turbopack_external_require`) stays where it was — the emit site reads `self.import_externals` from the surrounding `EsmAssetReference`, so `ImportSource::External` does not carry it. No additional `MergeableModuleExposure` or `additional_ids` changes are needed. The rename module is never referenced at runtime; no snapshot files change. ### Tests - `turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/` — scope-hoisted execution test covering self-namespace re-exports, nested access (`Data.Data.foo`, `Data.Data.Data.bar`), chained re-exports through another module, and namespace key enumeration. - `turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/` — same test cases, run with scope hoisting disabled, reusing the fixtures from the sibling directory. Verified by `cargo test --test execution` (213 passed) and `cargo test --test snapshot` (89 passed) in `turbopack-tests`. No snapshot files are modified. Closes NEXT- Fixes # --------- Co-authored-by: Tobias Koppers Co-authored-by: Claude Co-authored-by: Niklas Mischkulnig <4586894+mischnic@users.noreply.github.com> --- .../src/references/esm/base.rs | 209 ++++++++++-------- .../src/references/esm/export.rs | 2 +- .../input/index.js | 1 + .../options.json | 3 + .../exports/self-reexport-star/input/data.js | 16 ++ .../exports/self-reexport-star/input/index.js | 43 +++- .../self-reexport-star/input/reexport.js | 2 + 7 files changed, 181 insertions(+), 95 deletions(-) create mode 100644 turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/input/index.js create mode 100644 turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/options.json create mode 100644 turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/reexport.js diff --git a/turbopack/crates/turbopack-ecmascript/src/references/esm/base.rs b/turbopack/crates/turbopack-ecmascript/src/references/esm/base.rs index 76f38b7e78a5..8782de9af4c7 100644 --- a/turbopack/crates/turbopack-ecmascript/src/references/esm/base.rs +++ b/turbopack/crates/turbopack-ecmascript/src/references/esm/base.rs @@ -69,12 +69,52 @@ pub enum ReferencedAssetIdent { }, /// The given export (or namespace) should be imported and will be assigned to a new variable. Module { + /// The name of the variable that will hold the imported namespace. Cached from + /// `import_source.get_namespace_ident(..)` at resolution time so downstream sync + /// visitors can read it without re-entering the async layer. namespace_ident: String, ctxt: Option, export: Option, + /// Describes what to import to populate the variable that `namespace_ident` names. + /// + /// When the ident was resolved through a re-export chain (e.g. `export * as X from + /// './inner'`), this is the final module in that chain, not the directly referenced + /// asset — so the `.i(...)` call that initializes the variable loads the module whose + /// namespace `namespace_ident` claims to hold. + import_source: ImportSource, }, } +/// The source to import when initializing a `ReferencedAssetIdent::Module` variable. +#[derive(Debug)] +pub enum ImportSource { + /// Import an in-graph module. + Module { + asset: ResolvedVc>, + }, + /// Import an external dependency. The emitting site decides between + /// `__turbopack_external_import` and `__turbopack_external_require` based on its own + /// `import_externals` flag. + External { request: RcStr, ty: ExternalType }, +} + +impl ImportSource { + /// Compute the name of the variable that should hold the imported namespace. + pub async fn get_namespace_ident( + &self, + chunking_context: Vc>, + ) -> Result { + Ok(match self { + ImportSource::Module { asset } => { + ReferencedAsset::get_ident_from_placeable(asset, chunking_context).await? + } + ImportSource::External { request, ty } => { + magic_identifier::mangle(&format!("{ty} external {request}")) + } + }) + } +} + impl ReferencedAssetIdent { pub fn into_module_namespace_ident(self) -> Option<(String, Option)> { match self { @@ -98,6 +138,7 @@ impl ReferencedAssetIdent { namespace_ident, ctxt, export, + import_source: _, } => { if let Some(export) = export { Either::Right(MemberExpr { @@ -233,10 +274,12 @@ impl ReferencedAsset { // but in the module containing the reexport ctxt: None, export, + import_source, }) => Some(ReferencedAssetIdent::Module { namespace_ident, ctxt: Some(ctxt), export, + import_source, }), ident => ident, }, @@ -249,18 +292,26 @@ impl ReferencedAsset { } } + let import_source = ImportSource::Module { asset: *asset }; Some(ReferencedAssetIdent::Module { - namespace_ident: Self::get_ident_from_placeable(asset, chunking_context) - .await?, + namespace_ident: import_source.get_namespace_ident(chunking_context).await?, ctxt: None, export, + import_source, + }) + } + ReferencedAsset::External(request, ty) => { + let import_source = ImportSource::External { + request: request.clone(), + ty: *ty, + }; + Some(ReferencedAssetIdent::Module { + namespace_ident: import_source.get_namespace_ident(chunking_context).await?, + ctxt: None, + export, + import_source, }) } - ReferencedAsset::External(request, ty) => Some(ReferencedAssetIdent::Module { - namespace_ident: magic_identifier::mangle(&format!("{ty} external {request}")), - ctxt: None, - export, - }), ReferencedAsset::None | ReferencedAsset::Unresolvable => None, }) } @@ -617,11 +668,22 @@ impl EsmAssetReference { scope_hoisting_context, ) .await?; + // `referenced_asset` must not be used past this point: the ident carries + // everything about the import target (see `ImportSource`) — notably, + // when the ident was resolved through a re-export chain, the + // directly-referenced asset is the outer (rename) module, not the one + // the emitted variable actually holds. + drop(referenced_asset); match ident { Some(ReferencedAssetIdent::LocalBinding { .. }) => { // no need to import } - Some(ident @ ReferencedAssetIdent::Module { .. }) => { + Some(ReferencedAssetIdent::Module { + namespace_ident, + ctxt, + export: _, + import_source, + }) => { let span = this .issue_source .to_swc_offsets() @@ -629,43 +691,31 @@ impl EsmAssetReference { .map_or(DUMMY_SP, |(start, end)| { Span::new(BytePos(start), BytePos(end)) }); - match &*referenced_asset { - ReferencedAsset::Unresolvable => { - unreachable!(); - } - ReferencedAsset::Some(asset) => { + let name = Ident::new( + namespace_ident.into(), + DUMMY_SP, + ctxt.unwrap_or_default(), + ); + let (key, mut call_expr) = match import_source { + ImportSource::Module { asset } => { let id = asset.chunk_item_id(chunking_context).await?; - let (sym, ctxt) = - ident.into_module_namespace_ident().unwrap(); - let name = Ident::new( - sym.into(), - DUMMY_SP, - ctxt.unwrap_or_default(), - ); - let mut call_expr = quote!( - "$turbopack_import($id)" as Expr, - turbopack_import: Expr = TURBOPACK_IMPORT.into(), - id: Expr = module_id_to_lit(&id), - ); - if this.is_pure_import { - call_expr.set_span(PURE_SP); - } - result.push(CodeGenerationHoistedStmt::new( - id.to_string().into(), - var_decl_with_span( - quote!( - "var $name = $call;" as Stmt, - name = name, - call: Expr = call_expr - ), - span, + // Include ctxt in the key to prevent incorrect + // deduplication when multiple merged modules import the + // same target but have different syntax contexts (which + // would cause hygiene to rename one of them). + ( + format!("{} {:?}", id, ctxt).into(), + quote!( + "$turbopack_import($id)" as Expr, + turbopack_import: Expr = TURBOPACK_IMPORT.into(), + id: Expr = module_id_to_lit(&id), ), - )); + ) } - ReferencedAsset::External( + ImportSource::External { request, - ExternalType::EcmaScriptModule, - ) => { + ty: ExternalType::EcmaScriptModule, + } => { if !*chunking_context .environment() .supports_esm_externals() @@ -677,45 +727,25 @@ impl EsmAssetReference { chunking_context.name() ); } - let (sym, ctxt) = - ident.into_module_namespace_ident().unwrap(); - let name = Ident::new( - sym.into(), - DUMMY_SP, - ctxt.unwrap_or_default(), - ); - let mut call_expr = if import_externals { + let call = if import_externals { quote!( "$turbopack_external_import($id)" as Expr, turbopack_external_import: Expr = TURBOPACK_EXTERNAL_IMPORT.into(), - id: Expr = Expr::Lit(request.clone().to_string().into()) + id: Expr = Expr::Lit(request.to_string().into()) ) } else { quote!( "$turbopack_external_require($id, () => require($id), true)" as Expr, turbopack_external_require: Expr = TURBOPACK_EXTERNAL_REQUIRE.into(), - id: Expr = Expr::Lit(request.clone().to_string().into()) + id: Expr = Expr::Lit(request.to_string().into()) ) }; - if this.is_pure_import { - call_expr.set_span(PURE_SP); - } - result.push(CodeGenerationHoistedStmt::new( - name.sym.as_str().into(), - var_decl_with_span( - quote!( - "var $name = $call;" as Stmt, - name = name, - call: Expr = call_expr, - ), - span, - ), - )); + (name.sym.as_str().into(), call) } - ReferencedAsset::External( + ImportSource::External { request, - ExternalType::CommonJs | ExternalType::Url, - ) => { + ty: ExternalType::CommonJs | ExternalType::Url, + } => { if !*chunking_context .environment() .supports_commonjs_externals() @@ -727,36 +757,16 @@ impl EsmAssetReference { chunking_context.name() ); } - let (sym, ctxt) = - ident.into_module_namespace_ident().unwrap(); - let name = Ident::new( - sym.into(), - DUMMY_SP, - ctxt.unwrap_or_default(), - ); - let mut call_expr = quote!( + let call = quote!( "$turbopack_external_require($id, () => require($id), true)" as Expr, turbopack_external_require: Expr = TURBOPACK_EXTERNAL_REQUIRE.into(), - id: Expr = Expr::Lit(request.clone().to_string().into()) + id: Expr = Expr::Lit(request.to_string().into()) ); - if this.is_pure_import { - call_expr.set_span(PURE_SP); - } - result.push(CodeGenerationHoistedStmt::new( - name.sym.as_str().into(), - var_decl_with_span( - quote!( - "var $name = $call;" as Stmt, - name = name, - call: Expr = call_expr, - ), - span, - ), - )); + (name.sym.as_str().into(), call) } // fallback in case we introduce a new `ExternalType` #[allow(unreachable_patterns)] - ReferencedAsset::External(request, ty) => { + ImportSource::External { request, ty, .. } => { bail!( "Unsupported external type {:?} for ESM reference \ with request: {:?}", @@ -764,8 +774,21 @@ impl EsmAssetReference { request ) } - ReferencedAsset::None => {} }; + if this.is_pure_import { + call_expr.set_span(PURE_SP); + } + result.push(CodeGenerationHoistedStmt::new( + key, + var_decl_with_span( + quote!( + "var $name = $call;" as Stmt, + name = name, + call: Expr = call_expr + ), + span, + ), + )); } None => { // Nothing to import. diff --git a/turbopack/crates/turbopack-ecmascript/src/references/esm/export.rs b/turbopack/crates/turbopack-ecmascript/src/references/esm/export.rs index 636200adafa1..8353b7d08103 100644 --- a/turbopack/crates/turbopack-ecmascript/src/references/esm/export.rs +++ b/turbopack/crates/turbopack-ecmascript/src/references/esm/export.rs @@ -786,7 +786,7 @@ impl EsmExports { } } }, - ReferencedAssetIdent::Module { namespace_ident:_, ctxt:_, export:_ } => { + ReferencedAssetIdent::Module { .. } => { // Otherwise we need to bind as a getter to preserve the 'liveness' of the other modules bindings. // TODO: If this becomes important it might be faster to use the runtime to copy PropertyDescriptors across modules // since that would reduce allocations and optimize access. We could do this by passing the module-id up. diff --git a/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/input/index.js b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/input/index.js new file mode 100644 index 000000000000..c9e6da35a5e0 --- /dev/null +++ b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/input/index.js @@ -0,0 +1 @@ +import '../../self-reexport-star/input/index.js' diff --git a/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/options.json b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/options.json new file mode 100644 index 000000000000..bf30b867b721 --- /dev/null +++ b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star-no-hoisting/options.json @@ -0,0 +1,3 @@ +{ + "scopeHoisting": false +} diff --git a/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/data.js b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/data.js index 6a5bb384aaf0..494b407b572b 100644 --- a/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/data.js +++ b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/data.js @@ -1,3 +1,5 @@ +import * as Self from './data' + export function foo() { return 'foo' } @@ -6,4 +8,18 @@ export function bar() { return 'bar' } +// Call exports through the module's own namespace object. +export function fooViaSelf() { + return Self.foo() +} + +export function barViaSelf() { + return Self.bar() +} + +// Access the re-exported self-namespace via the self-namespace itself. +export function nestedSelfFoo() { + return Self.Data.foo() +} + export * as Data from './data' diff --git a/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/index.js b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/index.js index 310eae820a69..fd1c5f6a7fd1 100644 --- a/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/index.js +++ b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/index.js @@ -1,6 +1,47 @@ -import { Data } from './data' +import { Data, foo, bar, fooViaSelf, barViaSelf, nestedSelfFoo } from './data' +import * as DataNs from './data' +import { + Data as ChainedData, + foo as chainedFoo, + bar as chainedBar, +} from './reexport' it('should re-export own namespace correctly', () => { expect(Data.foo()).toBe('foo') expect(Data.bar()).toBe('bar') }) + +it('should expose named exports alongside the self-namespace', () => { + expect(foo()).toBe('foo') + expect(bar()).toBe('bar') +}) + +it('should allow using the self-namespace from inside the module', () => { + expect(fooViaSelf()).toBe('foo') + expect(barViaSelf()).toBe('bar') + expect(nestedSelfFoo()).toBe('foo') +}) + +it('should let the self-namespace reference itself recursively', () => { + expect(Data.Data.foo()).toBe('foo') + expect(Data.Data.Data.bar()).toBe('bar') +}) + +it('should expose the same self-namespace via a star-namespace import', () => { + expect(DataNs.Data).toBe(Data) + expect(DataNs.Data.foo).toBe(foo) +}) + +it('should re-export the self-namespace through a chained module', () => { + expect(ChainedData).toBe(Data) + expect(ChainedData.foo()).toBe('foo') + expect(ChainedData.bar()).toBe('bar') + expect(chainedFoo).toBe(foo) + expect(chainedBar).toBe(bar) +}) + +it('should enumerate all expected keys on the self-namespace', () => { + expect(Object.keys(Data).sort()).toEqual( + ['Data', 'bar', 'barViaSelf', 'foo', 'fooViaSelf', 'nestedSelfFoo'].sort() + ) +}) diff --git a/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/reexport.js b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/reexport.js new file mode 100644 index 000000000000..df98d86fb47a --- /dev/null +++ b/turbopack/crates/turbopack-tests/tests/execution/turbopack/exports/self-reexport-star/input/reexport.js @@ -0,0 +1,2 @@ +export { Data } from './data' +export * from './data' From 908af50147e7af2d31134ecf3956bde8b2b2c3c3 Mon Sep 17 00:00:00 2001 From: Zack Tanner <1939140+ztanner@users.noreply.github.com> Date: Tue, 28 Apr 2026 12:55:55 -0700 Subject: [PATCH 03/15] [ci]: Use macOS arm runner for intel builds (#93330) The intel runners were quite a bit slower than the base arm runners. We were previously compiling both architectures on self-hosted arm so this just back the status quo on GH runners to improve speed. This shaves off ~10/20 mins. Validated [here](https://github.com/vercel/next.js/actions/runs/25071926790/job/73454315459) --- .github/workflows/build_and_deploy.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build_and_deploy.yml b/.github/workflows/build_and_deploy.yml index b7d1d535158e..9a0e68f09203 100644 --- a/.github/workflows/build_and_deploy.yml +++ b/.github/workflows/build_and_deploy.yml @@ -183,13 +183,10 @@ jobs: build_task: build-native-release os: mac: + host: "['macos-15']" vendor: apple sys: darwin - arch: - x86_64: - host: "['macos-15-intel']" - aarch64: - host: "['macos-15']" + arch: [x86_64, aarch64] windows: host: "['windows-latest-8-core-oss']" vendor: pc @@ -226,7 +223,7 @@ jobs: name: stable - ${{ matrix.target }} - node@20 runs-on: ${{ fromJSON(matrix.host) }} - timeout-minutes: ${{ matrix.os == 'mac' && 90 || 45 }} + timeout-minutes: 45 env: # Disable all build caches for production/staging/force-preview deploys NEXT_SKIP_BUILD_CACHE: ${{ contains(fromJSON('["production","staging","force-preview"]'), needs.deploy-target.outputs.value) && '1' || '' }} From b694eeb590344465594d44f2178a1c6da379c805 Mon Sep 17 00:00:00 2001 From: Niklas Mischkulnig <4586894+mischnic@users.noreply.github.com> Date: Tue, 28 Apr 2026 22:21:36 +0200 Subject: [PATCH 04/15] Turbopack: only compute module ids for included modules (#93276) Currently doesn't have a measureable performance impact. Previously we assigned module ids for all nodes in the graph, even the subgraphs we detect as unused. They weren't chunked already, but the codegen that references those wasn't gracefully handling `.chunk_item_id(m)` failing (where `m` is a unused skipped module) So `.iter_nodes()` should basically be deprecated. It doesn't respect `removed_unused_imports` and the side-effect-free inference. And this would get much much worse once NFT is on the module graph, because then there are way more modules that should be skipped via the same mechanism This manifested as the following error after switching from `.iter_nodes` to `.traverse_edges_unordered` in global_module_ids.rs ``` Debug info: - An error occurred while generating the chunk item [project]/node_modules/.pnpm/next@file+..+next.js+packages+next_@babel+core@7.29.0_@opentelemetry+api@1.7.0_@playwri_9a8ea08672cbf59c2720abbb0c8879be/node_modules/next/dist/esm/server/stream-utils/node-web-streams-helper.js [app-rsc] (ecmascript) - Execution of ::chunk_item_content failed - Execution of *EcmascriptChunkItemContent::new failed - Execution of EcmascriptModuleContent::new_merged failed - ModuleId not found for ident: [project]/node_modules/.pnpm/next@file+..+next.js+packages+next_@babel+core@7.29.0_@opentelemetry+api@1.7.0_@playwri_9a8ea08672cbf59c2720abbb0c8879be/node_modules/next/dist/esm/shared/lib/hash.js [app-rsc] (ecmascript) ``` --- .../src/module_graph/binding_usage_info.rs | 16 ++++++++ .../src/module_graph/merged_modules.rs | 5 +-- .../turbopack-core/src/module_graph/mod.rs | 4 +- .../module_graph/side_effect_module_info.rs | 38 +++++++++++++++++++ .../src/references/esm/binding.rs | 8 ++++ ...-unused-imports_exports_input_07rt4kf._.js | 4 +- ...sed-imports_exports_input_07rt4kf._.js.map | 4 +- .../crates/turbopack/src/global_module_ids.rs | 18 ++++----- 8 files changed, 77 insertions(+), 20 deletions(-) diff --git a/turbopack/crates/turbopack-core/src/module_graph/binding_usage_info.rs b/turbopack/crates/turbopack-core/src/module_graph/binding_usage_info.rs index a6892cafca75..00e0eb879a5b 100644 --- a/turbopack/crates/turbopack-core/src/module_graph/binding_usage_info.rs +++ b/turbopack/crates/turbopack-core/src/module_graph/binding_usage_info.rs @@ -292,6 +292,22 @@ pub async fn compute_binding_usage_info( .await? ); } + + static PRINT_USED_EXPORTS: Lazy = Lazy::new(|| { + std::env::var_os("TURBOPACK_PRINT_USED_EXPORTS") + .is_some_and(|v| v == "1" || v == "true") + }); + if *PRINT_USED_EXPORTS { + use turbo_tasks::TryJoinIterExt; + println!( + "used exports: {:#?}", + used_exports + .iter() + .map(async |(m, v)| Ok((m.ident_string().await?, v,))) + .try_join() + .await? + ); + } } Ok(BindingUsageInfo { diff --git a/turbopack/crates/turbopack-core/src/module_graph/merged_modules.rs b/turbopack/crates/turbopack-core/src/module_graph/merged_modules.rs index 70e736d38e5d..c841342b6076 100644 --- a/turbopack/crates/turbopack-core/src/module_graph/merged_modules.rs +++ b/turbopack/crates/turbopack-core/src/module_graph/merged_modules.rs @@ -139,9 +139,8 @@ pub async fn compute_merged_modules(module_graph: Vc) -> Result>(module) diff --git a/turbopack/crates/turbopack-core/src/module_graph/mod.rs b/turbopack/crates/turbopack-core/src/module_graph/mod.rs index 85d37134e425..49b415ac6b16 100644 --- a/turbopack/crates/turbopack-core/src/module_graph/mod.rs +++ b/turbopack/crates/turbopack-core/src/module_graph/mod.rs @@ -981,9 +981,7 @@ impl ModuleGraphSnapshot { /// This is primarily useful for debugging. pub async fn get_ids(&self) -> Result>, ReadRef>> { Ok(self - .graphs - .iter() - .flat_map(|g| g.iter_nodes()) + .iter_nodes() .map(async |n| Ok((n, n.ident().to_string().await?))) .try_join() .await? diff --git a/turbopack/crates/turbopack-core/src/module_graph/side_effect_module_info.rs b/turbopack/crates/turbopack-core/src/module_graph/side_effect_module_info.rs index 2034fbdd24ec..8a01210bdc98 100644 --- a/turbopack/crates/turbopack-core/src/module_graph/side_effect_module_info.rs +++ b/turbopack/crates/turbopack-core/src/module_graph/side_effect_module_info.rs @@ -105,6 +105,44 @@ async fn compute_side_effect_free_module_info_single( }, |_, _, _| Ok(()), )?; + #[cfg(debug_assertions)] + { + use once_cell::sync::Lazy; + static PRINT_SIDE_EFFECT_INFO: Lazy = Lazy::new(|| { + std::env::var_os("TURBOPACK_PRINT_SIDE_EFFECT_INFO") + .is_some_and(|v| v == "1" || v == "true") + }); + if *PRINT_SIDE_EFFECT_INFO { + use turbo_tasks::TryJoinIterExt; + println!( + "side effect free modules: {:#?}", + module_side_effects + .iter() + .filter_map(|(m, e)| match e { + ModuleSideEffects::SideEffectful => None, + ModuleSideEffects::SideEffectFree => Some((m, false)), + ModuleSideEffects::ModuleEvaluationIsSideEffectFree => { + if locally_side_effect_free_modules_that_have_side_effects.contains(m) { + None + } else { + Some((m, true)) + } + } + }) + .map(async |(m, locally)| Ok(( + m.ident_string().await?, + if locally { + "inferred locally" + } else { + "declared" + } + ))) + .try_join() + .await? + ); + } + } + let side_effect_free_modules = module_side_effects .into_iter() .filter_map(|(m, e)| match e { diff --git a/turbopack/crates/turbopack-ecmascript/src/references/esm/binding.rs b/turbopack/crates/turbopack-ecmascript/src/references/esm/binding.rs index 79ffaa1e4da8..c01fe3ab774d 100644 --- a/turbopack/crates/turbopack-ecmascript/src/references/esm/binding.rs +++ b/turbopack/crates/turbopack-ecmascript/src/references/esm/binding.rs @@ -62,6 +62,14 @@ impl EsmBinding { chunking_context: Vc>, scope_hoisting_context: ScopeHoistingContext<'_>, ) -> Result { + if chunking_context + .unused_references() + .contains_key(&ResolvedVc::upcast(self.reference)) + .await? + { + return Ok(CodeGeneration::empty()); + } + let mut visitors = vec![]; let export = self.export.clone(); diff --git a/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js b/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js index 4bc13995ce08..a58c2b7db465 100644 --- a/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js +++ b/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js @@ -16,7 +16,7 @@ __turbopack_context__.s([ var __TURBOPACK__imported__module__$5b$project$5d2f$turbopack$2f$crates$2f$turbopack$2d$tests$2f$tests$2f$snapshot$2f$remove$2d$unused$2d$imports$2f$exports$2f$input$2f$library$2f$leaf$2e$js__$5b$test$5d$__$28$ecmascript$29$__ = __turbopack_context__.i("[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/library/leaf.js [test] (ecmascript)"); ; function sharedX() { - (0, __TURBOPACK__imported__module__$5b$project$5d2f$turbopack$2f$crates$2f$turbopack$2d$tests$2f$tests$2f$snapshot$2f$remove$2d$unused$2d$imports$2f$exports$2f$input$2f$library$2f$leaf$2e$js__$5b$test$5d$__$28$ecmascript$29$__["leafX"])(); + leafX(); } function sharedY() { (0, __TURBOPACK__imported__module__$5b$project$5d2f$turbopack$2f$crates$2f$turbopack$2d$tests$2f$tests$2f$snapshot$2f$remove$2d$unused$2d$imports$2f$exports$2f$input$2f$library$2f$leaf$2e$js__$5b$test$5d$__$28$ecmascript$29$__["leafY"])(); @@ -50,7 +50,7 @@ var __TURBOPACK__imported__module__$5b$project$5d2f$turbopack$2f$crates$2f$turbo ; (0, __TURBOPACK__imported__module__$5b$project$5d2f$turbopack$2f$crates$2f$turbopack$2d$tests$2f$tests$2f$snapshot$2f$remove$2d$unused$2d$imports$2f$exports$2f$input$2f$library$2f$y$2e$js__$5b$test$5d$__$28$ecmascript$29$__["y"])(); function helper() { - return (0, __TURBOPACK__imported__module__$5b$project$5d2f$turbopack$2f$crates$2f$turbopack$2d$tests$2f$tests$2f$snapshot$2f$remove$2d$unused$2d$imports$2f$exports$2f$input$2f$library$2f$x$2e$js__$5b$test$5d$__$28$ecmascript$29$__["x"])(); + return x(); } function unused() { return helper(); diff --git a/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js.map b/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js.map index bfbc3db431e1..e9344ecff0b8 100644 --- a/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js.map +++ b/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/output/0_9x_turbopack-tests_tests_snapshot_remove-unused-imports_exports_input_07rt4kf._.js.map @@ -3,8 +3,8 @@ "sources": [], "sections": [ {"offset": {"line": 4, "column": 0}, "map": {"version":3,"sources":["turbopack:///[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/library/leaf.js"],"sourcesContent":["export function leafX() {}\nexport function leafY() {}\n"],"names":["leafX","leafY"],"mappings":"AAAO,SAASA,SAAS;AAClB,SAASC,SAAS"}}, - {"offset": {"line": 15, "column": 0}, "map": {"version":3,"sources":["turbopack:///[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/library/shared.js"],"sourcesContent":["import { leafX, leafY } from './leaf'\n\nexport function sharedX() {\n leafX()\n}\nexport function sharedY() {\n leafY()\n}\n"],"names":["sharedX","sharedY"],"mappings":"AAAA;;AAEO,SAASA;IACd,IAAA,uOAAK;AACP;AACO,SAASC;IACd,IAAA,uOAAK;AACP"}}, + {"offset": {"line": 15, "column": 0}, "map": {"version":3,"sources":["turbopack:///[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/library/shared.js"],"sourcesContent":["import { leafX, leafY } from './leaf'\n\nexport function sharedX() {\n leafX()\n}\nexport function sharedY() {\n leafY()\n}\n"],"names":["sharedX","leafX","sharedY"],"mappings":"AAAA;;AAEO,SAASA;IACdC;AACF;AACO,SAASC;IACd,IAAA,uOAAK;AACP"}}, {"offset": {"line": 32, "column": 0}, "map": {"version":3,"sources":["turbopack:///[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/library/y.js"],"sourcesContent":["import { sharedY } from './shared'\n\nglobalThis.yBundled = true\n\nexport function y() {\n sharedY()\n}\n"],"names":["globalThis","yBundled","y"],"mappings":"AAAA;;AAEAA,WAAWC,QAAQ,GAAG;AAEf,SAASC;IACd,IAAA,2OAAO;AACT"}}, - {"offset": {"line": 47, "column": 0}, "map": {"version":3,"sources":["turbopack:///[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/a.js"],"sourcesContent":["import { x } from './library/x.js'\nimport { y } from './library/y.js'\n\ny()\n\nfunction helper() {\n return x()\n}\n\nexport function unused() {\n return helper()\n}\n\nexport function used() {\n return 1234\n}\n"],"names":["helper","unused","used"],"mappings":"AACA;;;AAEA,IAAA,gOAAC;AAED,SAASA;IACP,OAAO,IAAA,gOAAC;AACV;AAEO,SAASC;IACd,OAAOD;AACT;AAEO,SAASE;IACd,OAAO;AACT"}}, + {"offset": {"line": 47, "column": 0}, "map": {"version":3,"sources":["turbopack:///[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/a.js"],"sourcesContent":["import { x } from './library/x.js'\nimport { y } from './library/y.js'\n\ny()\n\nfunction helper() {\n return x()\n}\n\nexport function unused() {\n return helper()\n}\n\nexport function used() {\n return 1234\n}\n"],"names":["helper","x","unused","used"],"mappings":"AACA;;;AAEA,IAAA,gOAAC;AAED,SAASA;IACP,OAAOC;AACT;AAEO,SAASC;IACd,OAAOF;AACT;AAEO,SAASG;IACd,OAAO;AACT"}}, {"offset": {"line": 69, "column": 0}, "map": {"version":3,"sources":["turbopack:///[project]/turbopack/crates/turbopack-tests/tests/snapshot/remove-unused-imports/exports/input/index.js"],"sourcesContent":["import { used } from './a.js'\n\nconsole.log(used)\n"],"names":["console","log"],"mappings":"AAAA;;AAEAA,QAAQC,GAAG,CAAC,wNAAI"}}] } \ No newline at end of file diff --git a/turbopack/crates/turbopack/src/global_module_ids.rs b/turbopack/crates/turbopack/src/global_module_ids.rs index 58b9dc287b2f..7590a40394de 100644 --- a/turbopack/crates/turbopack/src/global_module_ids.rs +++ b/turbopack/crates/turbopack/src/global_module_ids.rs @@ -1,5 +1,5 @@ use anyhow::{Context, Result, bail}; -use rustc_hash::FxHashMap; +use rustc_hash::{FxHashMap, FxHashSet}; use smallvec::SmallVec; use tracing::Instrument; use turbo_rcstr::RcStr; @@ -23,17 +23,13 @@ pub async fn get_global_module_id_strategy( let span = tracing::info_span!("compute module id map"); async move { let module_graph = module_graph.await?; - let graphs = &module_graph.graphs; - // All modules in the graph - let module_idents = graphs - .iter() - .flat_map(|graph| graph.iter_nodes()) - .map(|m| m.ident()); - - // And additionally, all the modules that are inserted by chunking (i.e. async loaders) + // All modules in the graph and additionally, all the modules that are inserted by chunking + // (i.e. async loaders) + let mut modules = FxHashSet::default(); let mut async_idents = vec![]; module_graph.traverse_edges_unordered(|parent, current| { + modules.insert(current); if let Some(( _, &RefData { @@ -49,7 +45,9 @@ pub async fn get_global_module_id_strategy( Ok(()) })?; - let mut module_id_map = module_idents + let mut module_id_map = modules + .into_iter() + .map(|m| m.ident()) .chain(async_idents.into_iter()) .map(|ident| async move { let ident = ident.to_resolved().await?; From f5e54c06726b571a042fce67417e40a29f6b8689 Mon Sep 17 00:00:00 2001 From: Hendrik Liebau Date: Tue, 28 Apr 2026 22:31:44 +0200 Subject: [PATCH 05/15] Forward invalid dynamic usage errors on client-side navigations (#93184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In dev mode, the static shell validation only runs during the initial page load and HMR refreshes, not during client-side navigations. During a client-side navigation an invalid dynamic usage error — for example a `'use cache'` fill timeout, or a request API like `cookies()` called inside a `'use cache'` scope — does still reach the browser via the errored `'use cache'` stream. But when the cache invocation is wrapped in a user-space `try`/`catch`, the error is caught and swallowed in user code, and because the static shell validation isn't running to surface it separately, the error is then neither sent to the browser nor logged to the terminal. When we skip the validation, we now wait for the full render stream to finish and then, if an error was recorded during the render, send it to the browser and log it to the terminal. Waiting for the stream to finish mirrors what the static shell validation already does, and ensures we also catch errors from `'use cache'` invocations that only run in the dynamic stage. --- .../next/src/server/app-render/app-render.tsx | 17 +++++ .../app-dir/use-cache-hanging/app/layout.tsx | 14 +++- .../app-dir/use-cache-hanging/app/page.tsx | 3 + .../use-cache-hanging.test.ts | 71 ++++++++++++++++++- 4 files changed, 102 insertions(+), 3 deletions(-) create mode 100644 test/e2e/app-dir/use-cache-hanging/app/page.tsx diff --git a/packages/next/src/server/app-render/app-render.tsx b/packages/next/src/server/app-render/app-render.tsx index e85cf161ad0c..103c28a69ca1 100644 --- a/packages/next/src/server/app-render/app-render.tsx +++ b/packages/next/src/server/app-render/app-render.tsx @@ -1575,6 +1575,23 @@ async function generateDynamicFlightRenderResultWithStagesInDev( ) } else { logValidationSkipped(ctx) + + // The main render may record an invalid dynamic usage error on the work + // store, e.g. a `'use cache'` fill timeout, or a request API like + // `cookies()` being called inside a `'use cache'` scope. Even when we + // don't spawn the static shell validation, surface any recorded error + // through the dev overlay so client-side navigations see the same error + // as initial HTML loads, where the spawned validation forwards it. Wait + // for the full stream to finish accumulating so we also catch errors from + // the dynamic stage. + void accumulatedChunksPromise.then(() => { + if (workStore.invalidDynamicUsageError) { + logMessagesAndSendErrorsToBrowser( + [workStore.invalidDynamicUsageError], + ctx + ) + } + }) } debugChannel = returnedDebugChannel diff --git a/test/e2e/app-dir/use-cache-hanging/app/layout.tsx b/test/e2e/app-dir/use-cache-hanging/app/layout.tsx index 888614deda3b..af3f9cfa3913 100644 --- a/test/e2e/app-dir/use-cache-hanging/app/layout.tsx +++ b/test/e2e/app-dir/use-cache-hanging/app/layout.tsx @@ -1,8 +1,20 @@ +import Link from 'next/link' import { ReactNode } from 'react' export default function Root({ children }: { children: ReactNode }) { return ( - {children} + + + {children} + ) } diff --git a/test/e2e/app-dir/use-cache-hanging/app/page.tsx b/test/e2e/app-dir/use-cache-hanging/app/page.tsx new file mode 100644 index 000000000000..7a402d64650f --- /dev/null +++ b/test/e2e/app-dir/use-cache-hanging/app/page.tsx @@ -0,0 +1,3 @@ +export default function Page() { + return

Home

+} diff --git a/test/e2e/app-dir/use-cache-hanging/use-cache-hanging.test.ts b/test/e2e/app-dir/use-cache-hanging/use-cache-hanging.test.ts index 74b811cbac35..d52767eef0f8 100644 --- a/test/e2e/app-dir/use-cache-hanging/use-cache-hanging.test.ts +++ b/test/e2e/app-dir/use-cache-hanging/use-cache-hanging.test.ts @@ -1,4 +1,5 @@ import { nextTestSetup } from 'e2e-utils' +import { retry } from 'next-test-utils' import stripAnsi from 'strip-ansi' const expectedTimeoutErrorMessage = @@ -17,7 +18,7 @@ describe('use-cache-hanging', () => { if (isNextDev) { describe('when a "use cache" fill hangs in the static stage', () => { - it('should show an error toast after a timeout', async () => { + it('should show an error after a timeout', async () => { const outputIndex = next.cliOutput.length const browser = await next.browser('/static') @@ -46,7 +47,7 @@ describe('use-cache-hanging', () => { }) describe('when a "use cache" fill hangs in the runtime stage', () => { - it('should show an error toast after a timeout', async () => { + it('should show an error after a timeout', async () => { const outputIndex = next.cliOutput.length const browser = await next.browser('/runtime') @@ -74,6 +75,72 @@ describe('use-cache-hanging', () => { }) }) + describe('when navigating to the static page', () => { + it('should show an error toast after a timeout', async () => { + const outputIndex = next.cliOutput.length + const browser = await next.browser('/') + + await browser.elementByCss('a[href="/static"]').click() + + await retry(() => { + const cliOutput = stripAnsi(next.cliOutput.slice(outputIndex)) + + expect(cliOutput).toContain(`Error: ${expectedTimeoutErrorMessage} + at getCachedData (app/static/page.tsx:6:1)`) + }, 20_000) + + await expect(browser).toDisplayCollapsedRedbox(` + { + "code": "E236", + "description": "Filling a cache during prerender timed out, likely because request-specific arguments such as params, searchParams, cookies() or dynamic data were used inside "use cache".", + "environmentLabel": "Server", + "label": "Console Error", + "source": "app/static/page.tsx (6:1) @ getCachedData + > 6 | async function getCachedData(): Promise { + | ^", + "stack": [ + "getCachedData app/static/page.tsx (6:1)", + "Cached app/static/page.tsx (18:24)", + "Page app/static/page.tsx (32:10)", + ], + } + `) + }) + }) + + describe('when navigating to the runtime page', () => { + it('should show an error toast after a timeout', async () => { + const outputIndex = next.cliOutput.length + const browser = await next.browser('/') + + await browser.elementByCss('a[href="/runtime"]').click() + + await retry(() => { + const cliOutput = stripAnsi(next.cliOutput.slice(outputIndex)) + + expect(cliOutput).toContain(`Error: ${expectedTimeoutErrorMessage} + at getCachedData (app/runtime/page.tsx:8:1)`) + }, 20_000) + + await expect(browser).toDisplayCollapsedRedbox(` + { + "code": "E236", + "description": "Filling a cache during prerender timed out, likely because request-specific arguments such as params, searchParams, cookies() or dynamic data were used inside "use cache".", + "environmentLabel": "Server", + "label": "Console Error", + "source": "app/runtime/page.tsx (8:1) @ getCachedData + > 8 | async function getCachedData(): Promise { + | ^", + "stack": [ + "getCachedData app/runtime/page.tsx (8:1)", + "Cached app/runtime/page.tsx (20:24)", + "Page app/runtime/page.tsx (42:7)", + ], + } + `) + }) + }) + describe('when a "use cache" performs long-running I/O in the dynamic stage', () => { it('should not time out', async () => { const outputIndex = next.cliOutput.length From 6ca4390a4557f4f118b265d7042623dce3280eb1 Mon Sep 17 00:00:00 2001 From: Will Binns-Smith Date: Tue, 28 Apr 2026 13:40:39 -0700 Subject: [PATCH 06/15] HMR: debounce status indicator transitions to prevent flicker during bursts of changes (#93229) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a debounce to the _ui_ in development, reducing churn and flicker from the Next.js logo indicator's "Compiling" and "Rendering" messages. This is wholistic and covers any rapid changes in dev, usually caused by agents making a series of changes. **There's currently a debounce of 30ms coming from the subscription to changes in Turbopack itself, we could consider lengthening that as well.** This commit adds a useDebouncedValue hook and uses it to debounce status transitions in NextLogo, so rapid active->active alternations (e.g. Compiling→Rendering→Compiling) are smoothed into a single stable state for 300ms. Transitions to/from None remain immediate so: - Fast single builds that complete before the 400ms enterDelay still never show the pill. - The pill still appears promptly when a long build starts (no added latency on top of the existing enter delay). Changes: - use-debounced-value.ts: new generic trailing-edge debounce hook with an optional leading predicate to bypass the delay for specific transitions. - next-logo.tsx: apply the debounce to the computed status; derive shouldShowStatus from the debounced value so both pill mount-gating and label transitions are smoothed together. Test Plan: Ran a script that simulated an agent making hundreds of changes in rapid successions. Verified before this change, the indicator would flash between Compiling and Rendering rapidly. Now, it consistently stays on "Compiling" before the final render. --- .../devtools-indicator/next-logo.tsx | 38 +++-- .../hooks/use-debounced-value.test.ts | 159 ++++++++++++++++++ .../dev-overlay/hooks/use-debounced-value.ts | 49 ++++++ 3 files changed, 236 insertions(+), 10 deletions(-) create mode 100644 packages/next/src/next-devtools/dev-overlay/hooks/use-debounced-value.test.ts create mode 100644 packages/next/src/next-devtools/dev-overlay/hooks/use-debounced-value.ts diff --git a/packages/next/src/next-devtools/dev-overlay/components/devtools-indicator/next-logo.tsx b/packages/next/src/next-devtools/dev-overlay/components/devtools-indicator/next-logo.tsx index d12da9fb58e3..11a9c1a55df5 100644 --- a/packages/next/src/next-devtools/dev-overlay/components/devtools-indicator/next-logo.tsx +++ b/packages/next/src/next-devtools/dev-overlay/components/devtools-indicator/next-logo.tsx @@ -7,6 +7,7 @@ import { css } from '../../utils/css' import { useDevOverlayContext } from '../../../dev-overlay.browser' import { useRenderErrorContext } from '../../dev-overlay' import { useDelayedRender } from '../../hooks/use-delayed-render' +import { useDebouncedValue } from '../../hooks/use-debounced-value' import { ACTION_ERROR_OVERLAY_CLOSE, ACTION_ERROR_OVERLAY_OPEN, @@ -17,6 +18,12 @@ import { StatusIndicator, Status, getCurrentStatus } from './status-indicator' const SHORT_DURATION_MS = 150 +// Smooth out rapid status transitions driven by bursty HMR events (e.g. +// Compiling→None→Compiling when consecutive compile episodes are <300ms apart, +// or Compiling→Rendering→Compiling oscillation). The debounce bridges burst +// gaps and active↔active flicker. +const STATUS_DEBOUNCE_MS = 300 + export function NextLogo({ onTriggerClick, ...buttonProps @@ -42,12 +49,30 @@ export function NextLogo({ ) // Cache indicator state management - const isCacheFilling = state.cacheIndicator === 'filling' const isCacheBypassing = state.cacheIndicator === 'bypass' + // Get the current status from the state. Debounce active↔active transitions + // (e.g. Compiling→Rendering) so bursts of HMR events don't flicker the + // badge. Transitions to None are committed immediately so fast single builds + // cancel the enter timer before the pill becomes visible. + const currentStatus = useDebouncedValue( + getCurrentStatus( + state.buildingIndicator, + state.renderingIndicator, + state.cacheIndicator + ), + STATUS_DEBOUNCE_MS, + { + // Only None→active is immediate (no debounce on top of enterDelay). + // active→None is debounced so short inter-burst gaps don't prematurely + // collapse the pill. Fast single builds (completing in