From f5d6b26fb36907523df754df8d86b87e97bed4e1 Mon Sep 17 00:00:00 2001 From: Tobias Koppers Date: Thu, 10 Oct 2024 17:47:47 +0200 Subject: [PATCH 1/8] [Turbopack] add support for collectibles to new backend (#70798) ### What? add support for collectibles to new backend also fix some bugs in the backend: * make in progress tasks as stale * skip updating output for stale tasks * flag task as dirty when they get output set and add and enable all remaining tests --- .../next/src/build/swc/generated-native.d.ts | 1 + .../turbo-tasks-auto-hash-map/src/map.rs | 8 + .../turbo-tasks-backend/src/backend/mod.rs | 202 +++++++++++++++--- .../backend/operation/aggregation_update.rs | 114 ++++++++-- .../backend/operation/cleanup_old_edges.rs | 69 +++++- .../src/backend/operation/invalidate.rs | 24 ++- .../src/backend/operation/mod.rs | 10 +- .../backend/operation/update_collectible.rs | 56 +++++ .../src/backend/operation/update_output.rs | 98 ++++++++- .../src/backend/storage.rs | 45 +++- .../crates/turbo-tasks-backend/src/data.rs | 85 ++++++-- .../turbo-tasks-backend/tests/collectibles.rs | 1 + .../tests/dirty_in_progress.rs | 1 + .../tests/recompute_collectibles.rs | 1 + .../tests/dirty_in_progress.rs | 1 + 15 files changed, 634 insertions(+), 82 deletions(-) create mode 100644 turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs create mode 120000 turbopack/crates/turbo-tasks-backend/tests/collectibles.rs create mode 120000 turbopack/crates/turbo-tasks-backend/tests/dirty_in_progress.rs create mode 120000 turbopack/crates/turbo-tasks-backend/tests/recompute_collectibles.rs diff --git a/packages/next/src/build/swc/generated-native.d.ts b/packages/next/src/build/swc/generated-native.d.ts index c7f991907a4ef..7c3a7b7614b98 100644 --- a/packages/next/src/build/swc/generated-native.d.ts +++ b/packages/next/src/build/swc/generated-native.d.ts @@ -249,6 +249,7 @@ export interface NapiUpdateInfo { } /** * Subscribes to lifecycle events of the compilation. + * * Emits an [UpdateMessage::Start] event when any computation starts. * Emits an [UpdateMessage::End] event when there was no computation for the * specified time (`aggregation_ms`). The [UpdateMessage::End] event contains diff --git a/turbopack/crates/turbo-tasks-auto-hash-map/src/map.rs b/turbopack/crates/turbo-tasks-auto-hash-map/src/map.rs index 595a16b69e82c..03ff1982d0166 100644 --- a/turbopack/crates/turbo-tasks-auto-hash-map/src/map.rs +++ b/turbopack/crates/turbo-tasks-auto-hash-map/src/map.rs @@ -547,6 +547,14 @@ impl<'a, K: Eq + Hash, V, H: BuildHasher + Default + 'a, const I: usize> Entry<' Entry::Vacant(entry) => entry.insert(default()), } } + + /// see [HashMap::Entry::or_insert](https://doc.rust-lang.org/std/collections/hash_map/enum.Entry.html#method.or_insert) + pub fn or_insert(self, default: V) -> &'a mut V { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default), + } + } } impl<'a, K: Eq + Hash, V: Default, H: BuildHasher + Default + 'a, const I: usize> diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs index b7a2fdfc0a337..b7f6199ca0299 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs @@ -43,13 +43,13 @@ use crate::{ AggregationUpdateQueue, CleanupOldEdgesOperation, ConnectChildOperation, ExecuteContext, OutdatedEdge, }, - storage::{get, get_many, remove, Storage}, + storage::{get, get_many, iter_many, remove, Storage}, }, backing_storage::{BackingStorage, ReadTransaction}, data::{ ActiveType, AggregationNumber, CachedDataItem, CachedDataItemIndex, CachedDataItemKey, - CachedDataItemValue, CachedDataUpdate, CellRef, InProgressCellState, InProgressState, - OutputValue, RootState, + CachedDataItemValue, CachedDataUpdate, CellRef, CollectibleRef, CollectiblesRef, + InProgressCellState, InProgressState, OutputValue, RootState, }, utils::{bi_map::BiMap, chunked_vec::ChunkedVec, ptr_eq_arc::PtrEqArc, sharded::Sharded}, }; @@ -361,7 +361,7 @@ impl TurboTasksBackendInner { // active and this task won't stale. CachedActiveUntilClean // is automatically removed when this task is clean. task.add_new(CachedDataItem::AggregateRoot { - value: RootState::new(ActiveType::CachedActiveUntilClean), + value: RootState::new(ActiveType::CachedActiveUntilClean, task_id), }); get!(task, AggregateRoot).unwrap() }; @@ -845,6 +845,38 @@ impl TurboTasksBackendInner { } } + // Make all current collectibles outdated (remove left-over outdated collectibles) + enum Collectible { + Current(CollectibleRef, i32), + Outdated(CollectibleRef), + } + let collectibles = task + .iter(CachedDataItemIndex::Collectibles) + .filter_map(|(key, value)| match (key, value) { + ( + &CachedDataItemKey::Collectible { collectible }, + &CachedDataItemValue::Collectible { value }, + ) => Some(Collectible::Current(collectible, value)), + (&CachedDataItemKey::OutdatedCollectible { collectible }, _) => { + Some(Collectible::Outdated(collectible)) + } + _ => None, + }) + .collect::>(); + for collectible in collectibles { + match collectible { + Collectible::Current(collectible, value) => { + let _ = + task.insert(CachedDataItem::OutdatedCollectible { collectible, value }); + } + Collectible::Outdated(collectible) => { + if !task.has_key(&CachedDataItemKey::Collectible { collectible }) { + task.remove(&CachedDataItemKey::OutdatedCollectible { collectible }); + } + } + } + } + // Make all dependencies outdated enum Dep { CurrentCell(CellRef), @@ -898,8 +930,6 @@ impl TurboTasksBackendInner { } } } - - // TODO: Make all collectibles outdated } let (span, future) = match task_type { @@ -1096,16 +1126,25 @@ impl TurboTasksBackendInner { .collect::>() } else { task.iter_all() - .filter_map(|(key, _)| match *key { + .filter_map(|(key, value)| match *key { CachedDataItemKey::OutdatedChild { task } => { Some(OutdatedEdge::Child(task)) } + CachedDataItemKey::OutdatedCollectible { collectible } => { + let CachedDataItemValue::OutdatedCollectible { value } = *value else { + unreachable!(); + }; + Some(OutdatedEdge::Collectible(collectible, value)) + } CachedDataItemKey::OutdatedCellDependency { target } => { Some(OutdatedEdge::CellDependency(target)) } CachedDataItemKey::OutdatedOutputDependency { target } => { Some(OutdatedEdge::OutputDependency(target)) } + CachedDataItemKey::OutdatedCollectiblesDependency { target } => { + Some(OutdatedEdge::CollectiblesDependency(target)) + } CachedDataItemKey::CellDependent { cell, task } if removed_cells .get(&cell.type_id) @@ -1132,7 +1171,7 @@ impl TurboTasksBackendInner { } AggregationUpdateJob::data_update( &mut task, - AggregatedDataUpdate::no_longer_dirty_container(task_id), + AggregatedDataUpdate::new().no_longer_dirty_container(task_id), ) } else { None @@ -1246,6 +1285,116 @@ impl TurboTasksBackendInner { } } + fn read_task_collectibles( + &self, + task_id: TaskId, + collectible_type: TraitTypeId, + reader_id: TaskId, + turbo_tasks: &dyn TurboTasksBackendApi, + ) -> AutoMap, 1> { + let mut ctx = self.execute_context(turbo_tasks); + let mut collectibles = AutoMap::default(); + { + let mut task = ctx.task(task_id, TaskDataCategory::Data); + // Ensure it's an root node + loop { + let aggregation_number = get_aggregation_number(&task); + if is_root_node(aggregation_number) { + break; + } + drop(task); + AggregationUpdateQueue::run( + AggregationUpdateJob::UpdateAggregationNumber { + task_id, + base_aggregation_number: u32::MAX, + distance: None, + }, + &mut ctx, + ); + task = ctx.task(task_id, TaskDataCategory::All); + } + for collectible in iter_many!(task, AggregatedCollectible { collectible } count if collectible.collectible_type == collectible_type && count > 0 => collectible.cell) + { + *collectibles + .entry(RawVc::TaskCell(collectible.task, collectible.cell)) + .or_insert(0) += 1; + } + for (collectible, count) in iter_many!(task, Collectible { collectible } count if collectible.collectible_type == collectible_type => (collectible.cell, count)) + { + *collectibles + .entry(RawVc::TaskCell(collectible.task, collectible.cell)) + .or_insert(0) += count; + } + task.insert(CachedDataItem::CollectiblesDependent { + collectible_type, + task: reader_id, + value: (), + }); + } + { + let mut reader = ctx.task(reader_id, TaskDataCategory::Data); + let target = CollectiblesRef { + task: task_id, + collectible_type, + }; + if reader.add(CachedDataItem::CollectiblesDependency { target, value: () }) { + reader.remove(&CachedDataItemKey::OutdatedCollectiblesDependency { target }); + } + } + collectibles + } + + fn emit_collectible( + &self, + collectible_type: TraitTypeId, + collectible: RawVc, + task_id: TaskId, + turbo_tasks: &dyn TurboTasksBackendApi, + ) { + let RawVc::TaskCell(collectible_task, cell) = collectible else { + panic!("Collectibles need to be resolved"); + }; + let cell = CellRef { + task: collectible_task, + cell, + }; + operation::UpdateCollectibleOperation::run( + task_id, + CollectibleRef { + collectible_type, + cell, + }, + 1, + self.execute_context(turbo_tasks), + ); + } + + fn unemit_collectible( + &self, + collectible_type: TraitTypeId, + collectible: RawVc, + count: u32, + task_id: TaskId, + turbo_tasks: &dyn TurboTasksBackendApi, + ) { + let RawVc::TaskCell(collectible_task, cell) = collectible else { + panic!("Collectibles need to be resolved"); + }; + let cell = CellRef { + task: collectible_task, + cell, + }; + operation::UpdateCollectibleOperation::run( + task_id, + CollectibleRef { + collectible_type, + cell, + }, + -(i32::try_from(count).unwrap()), + self.execute_context(turbo_tasks), + ); + } + fn update_task_cell( &self, task_id: TaskId, @@ -1293,7 +1442,7 @@ impl TurboTasksBackendInner { }, }); task.add(CachedDataItem::AggregateRoot { - value: RootState::new(root_type), + value: RootState::new(root_type, task_id), }); task.add(CachedDataItem::new_scheduled(move || match root_type { ActiveType::RootTask => "Root Task".to_string(), @@ -1474,33 +1623,36 @@ impl Backend for TurboTasksBackend { fn read_task_collectibles( &self, - _: TaskId, - _: TraitTypeId, - _: TaskId, - _: &dyn TurboTasksBackendApi, + task_id: TaskId, + collectible_type: TraitTypeId, + reader: TaskId, + turbo_tasks: &dyn TurboTasksBackendApi, ) -> AutoMap, 1> { - todo!() + self.0 + .read_task_collectibles(task_id, collectible_type, reader, turbo_tasks) } fn emit_collectible( &self, - _: TraitTypeId, - _: RawVc, - _: TaskId, - _: &dyn TurboTasksBackendApi, + collectible_type: TraitTypeId, + collectible: RawVc, + task_id: TaskId, + turbo_tasks: &dyn TurboTasksBackendApi, ) { - todo!() + self.0 + .emit_collectible(collectible_type, collectible, task_id, turbo_tasks) } fn unemit_collectible( &self, - _: TraitTypeId, - _: RawVc, - _: u32, - _: TaskId, - _: &dyn TurboTasksBackendApi, + collectible_type: TraitTypeId, + collectible: RawVc, + count: u32, + task_id: TaskId, + turbo_tasks: &dyn TurboTasksBackendApi, ) { - todo!() + self.0 + .unemit_collectible(collectible_type, collectible, count, task_id, turbo_tasks) } fn update_task_cell( diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs index c999dc28cab9d..a6b2b05664bdb 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs @@ -1,15 +1,18 @@ use std::{cmp::max, collections::VecDeque, num::NonZeroU32}; use serde::{Deserialize, Serialize}; +use smallvec::SmallVec; use turbo_tasks::TaskId; use crate::{ backend::{ - operation::{ExecuteContext, Operation, TaskGuard}, + operation::{ExecuteContext, InvalidateOperation, Operation, TaskGuard}, storage::{get, get_many, iter_many, remove, update, update_count}, TaskDataCategory, }, - data::{ActiveType, AggregationNumber, CachedDataItem, CachedDataItemKey, RootState}, + data::{ + ActiveType, AggregationNumber, CachedDataItem, CachedDataItemKey, CollectibleRef, RootState, + }, }; const LEAF_NUMBER: u32 = 16; @@ -85,6 +88,9 @@ pub enum AggregationUpdateJob { FindAndScheduleDirty { task_ids: Vec, }, + Invalidate { + task_ids: SmallVec<[TaskId; 4]>, + }, BalanceEdge { upper_id: TaskId, task_id: TaskId, @@ -111,13 +117,15 @@ impl AggregationUpdateJob { #[derive(Default, Serialize, Deserialize, Clone, Debug)] pub struct AggregatedDataUpdate { dirty_container_update: Option<(TaskId, i32)>, - // TODO collectibles + collectibles_update: Vec<(CollectibleRef, i32)>, } impl AggregatedDataUpdate { fn from_task(task: &mut TaskGuard<'_>) -> Self { let aggregation = get_aggregation_number(task); let mut dirty = get!(task, Dirty).is_some(); + let mut collectibles_update: Vec<_> = + get_many!(task, Collectible { collectible } => (collectible, 1)); if is_aggregating_node(aggregation) { let dirty_container_count = get!(task, AggregatedDirtyContainerCount) .copied() @@ -125,16 +133,29 @@ impl AggregatedDataUpdate { if dirty_container_count > 0 { dirty = true; } + for collectible in iter_many!(task, AggregatedCollectible { collectible } count if count > 0 => collectible) + { + collectibles_update.push((collectible, 1)); + } } if dirty { - Self::dirty_container(task.id()) + Self::new() + .dirty_container(task.id()) + .collectibles_update(collectibles_update) } else { - Self::default() + Self::new().collectibles_update(collectibles_update) } } fn invert(mut self) -> Self { - if let Some((_, value)) = self.dirty_container_update.as_mut() { + let Self { + dirty_container_update, + collectibles_update, + } = &mut self; + if let Some((_, value)) = dirty_container_update.as_mut() { + *value = -*value; + } + for (_, value) in collectibles_update.iter_mut() { *value = -*value; } self @@ -147,9 +168,17 @@ impl AggregatedDataUpdate { ) -> AggregatedDataUpdate { let Self { dirty_container_update, + collectibles_update, } = self; let mut result = Self::default(); if let Some((dirty_container_id, count)) = dirty_container_update { + // When a dirty container count is increased and the task is considered as active + // `AggregateRoot` we need to schedule the dirty tasks in the new dirty container + if *count > 0 && task.has_key(&CachedDataItemKey::AggregateRoot {}) { + queue.push(AggregationUpdateJob::FindAndScheduleDirty { + task_ids: vec![*dirty_container_id], + }) + } let mut added = false; let mut removed = false; update!( @@ -170,11 +199,6 @@ impl AggregatedDataUpdate { ); let mut count_update = 0; if added { - if task.has_key(&CachedDataItemKey::AggregateRoot {}) { - queue.push(AggregationUpdateJob::FindAndScheduleDirty { - task_ids: vec![*dirty_container_id], - }) - } count_update += 1; } else if removed { count_update -= 1; @@ -194,8 +218,8 @@ impl AggregatedDataUpdate { (new != 0).then_some(new) }); if let Some((_, count)) = result.dirty_container_update.as_ref() { - if let Some(root_state) = get!(task, AggregateRoot) { - if *count < 0 { + if *count < 0 { + if let Some(root_state) = get!(task, AggregateRoot) { root_state.all_clean_event.notify(usize::MAX); if matches!(root_state.ty, ActiveType::CachedActiveUntilClean) { task.remove(&CachedDataItemKey::AggregateRoot {}); @@ -204,26 +228,71 @@ impl AggregatedDataUpdate { } } } + for (collectible, count) in collectibles_update { + let mut added = false; + let mut removed = false; + update!( + task, + AggregatedCollectible { + collectible: *collectible + }, + |old: Option| { + let old = old.unwrap_or(0); + let new = old + *count; + if old <= 0 && new > 0 { + added = true; + } else if old > 0 && new <= 0 { + removed = true; + } + (new != 0).then_some(new) + } + ); + if added || removed { + let ty = collectible.collectible_type; + let dependent: SmallVec<[TaskId; 4]> = get_many!(task, CollectiblesDependent { collectible_type, task } if *collectible_type == ty => *task); + if !dependent.is_empty() { + queue.push(AggregationUpdateJob::Invalidate { + task_ids: dependent, + }) + } + } + if added { + result.collectibles_update.push((*collectible, 1)); + } else if removed { + result.collectibles_update.push((*collectible, -1)); + } + } result } fn is_empty(&self) -> bool { let Self { dirty_container_update, + collectibles_update, } = self; - dirty_container_update.is_none() + dirty_container_update.is_none() && collectibles_update.is_empty() } - pub fn dirty_container(task_id: TaskId) -> Self { + pub fn new() -> Self { Self { - dirty_container_update: Some((task_id, 1)), + dirty_container_update: None, + collectibles_update: Vec::new(), } } - pub fn no_longer_dirty_container(task_id: TaskId) -> Self { - Self { - dirty_container_update: Some((task_id, -1)), - } + pub fn dirty_container(mut self, task_id: TaskId) -> Self { + self.dirty_container_update = Some((task_id, 1)); + self + } + + pub fn no_longer_dirty_container(mut self, task_id: TaskId) -> Self { + self.dirty_container_update = Some((task_id, -1)); + self + } + + pub fn collectibles_update(mut self, collectibles_update: Vec<(CollectibleRef, i32)>) -> Self { + self.collectibles_update = collectibles_update; + self } } @@ -379,6 +448,9 @@ impl AggregationUpdateQueue { AggregationUpdateJob::BalanceEdge { upper_id, task_id } => { self.balance_edge(ctx, upper_id, task_id); } + AggregationUpdateJob::Invalidate { task_ids } => { + ctx.run_operation(self, |ctx| InvalidateOperation::run(task_ids, ctx)); + } } } @@ -502,7 +574,7 @@ impl AggregationUpdateQueue { if is_aggregating_node(get_aggregation_number(&task)) { if !task.has_key(&CachedDataItemKey::AggregateRoot {}) { task.insert(CachedDataItem::AggregateRoot { - value: RootState::new(ActiveType::CachedActiveUntilClean), + value: RootState::new(ActiveType::CachedActiveUntilClean, task_id), }); } let dirty_containers: Vec<_> = diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs index 624fcd00543ad..a92e687006215 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs @@ -11,11 +11,12 @@ use crate::{ AggregationUpdateQueue, }, invalidate::make_task_dirty, - ExecuteContext, Operation, + AggregatedDataUpdate, ExecuteContext, Operation, }, + storage::update_count, TaskDataCategory, }, - data::{CachedDataItemKey, CellRef}, + data::{CachedDataItemKey, CellRef, CollectibleRef, CollectiblesRef}, }; #[derive(Serialize, Deserialize, Clone, Default)] @@ -36,8 +37,10 @@ pub enum CleanupOldEdgesOperation { #[derive(Serialize, Deserialize, Clone)] pub enum OutdatedEdge { Child(TaskId), + Collectible(CollectibleRef, i32), CellDependency(CellRef), OutputDependency(TaskId), + CollectiblesDependency(CollectiblesRef), RemovedCellDependent(TaskId), } @@ -72,21 +75,51 @@ impl Operation for CleanupOldEdgesOperation { if let Some(edge) = outdated.pop() { match edge { OutdatedEdge::Child(child_id) => { + let mut children = Vec::new(); + children.push(child_id); + outdated.retain(|e| match e { + OutdatedEdge::Child(id) => { + children.push(*id); + false + } + _ => true, + }); let mut task = ctx.task(task_id, TaskDataCategory::All); - task.remove(&CachedDataItemKey::Child { task: child_id }); + for &child_id in children.iter() { + task.remove(&CachedDataItemKey::Child { task: child_id }); + } if is_aggregating_node(get_aggregation_number(&task)) { - queue.push(AggregationUpdateJob::InnerLostFollower { + queue.push(AggregationUpdateJob::InnerLostFollowers { upper_ids: vec![task_id], - lost_follower_id: child_id, + lost_follower_ids: children, }); } else { let upper_ids = get_uppers(&task); - queue.push(AggregationUpdateJob::InnerLostFollower { + queue.push(AggregationUpdateJob::InnerLostFollowers { upper_ids, - lost_follower_id: child_id, + lost_follower_ids: children, }); } } + OutdatedEdge::Collectible(collectible, count) => { + let mut collectibles = Vec::new(); + collectibles.push((collectible, count)); + outdated.retain(|e| match e { + OutdatedEdge::Collectible(collectible, count) => { + collectibles.push((*collectible, -*count)); + false + } + _ => true, + }); + let mut task = ctx.task(task_id, TaskDataCategory::All); + for &(collectible, count) in collectibles.iter() { + update_count!(task, Collectible { collectible }, -count); + } + queue.extend(AggregationUpdateJob::data_update( + &mut task, + AggregatedDataUpdate::new().collectibles_update(collectibles), + )); + } OutdatedEdge::CellDependency(CellRef { task: cell_task_id, cell, @@ -122,6 +155,28 @@ impl Operation for CleanupOldEdgesOperation { }); } } + OutdatedEdge::CollectiblesDependency(CollectiblesRef { + collectible_type, + task: dependent_task_id, + }) => { + { + let mut task = + ctx.task(dependent_task_id, TaskDataCategory::Data); + task.remove(&CachedDataItemKey::CollectiblesDependent { + collectible_type, + task: task_id, + }); + } + { + let mut task = ctx.task(task_id, TaskDataCategory::Data); + task.remove(&CachedDataItemKey::CollectiblesDependency { + target: CollectiblesRef { + collectible_type, + task: dependent_task_id, + }, + }); + } + } OutdatedEdge::RemovedCellDependent(task_id) => { make_task_dirty(task_id, queue, ctx); } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs index 2872f4f80fe5c..c3d9162050f5d 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs @@ -10,10 +10,10 @@ use crate::{ }, ExecuteContext, Operation, }, - storage::get, + storage::{get, get_mut}, TaskDataCategory, }, - data::{CachedDataItem, CachedDataItemKey}, + data::{CachedDataItem, CachedDataItemKey, InProgressState}, }; #[derive(Serialize, Deserialize, Clone, Default)] @@ -25,7 +25,6 @@ pub enum InvalidateOperation { AggregationUpdate { queue: AggregationUpdateQueue, }, - // TODO Add to dirty tasks list #[default] Done, } @@ -77,14 +76,29 @@ pub fn make_task_dirty( let mut task = ctx.task(task_id, TaskDataCategory::All); + make_task_dirty_internal(&mut task, task_id, true, queue, ctx); +} + +pub fn make_task_dirty_internal( + task: &mut super::TaskGuard, + task_id: TaskId, + make_stale: bool, + queue: &mut AggregationUpdateQueue, + ctx: &mut ExecuteContext, +) { + if make_stale { + if let Some(InProgressState::InProgress { stale, .. }) = get_mut!(task, InProgress) { + *stale = true; + } + } if task.add(CachedDataItem::Dirty { value: () }) { let dirty_container = get!(task, AggregatedDirtyContainerCount) .copied() .unwrap_or_default(); if dirty_container == 0 { queue.extend(AggregationUpdateJob::data_update( - &mut task, - AggregatedDataUpdate::dirty_container(task_id), + task, + AggregatedDataUpdate::new().dirty_container(task_id), )); } let root = task.has_key(&CachedDataItemKey::AggregateRoot {}); diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs index 6218ab9c032f3..515b16900a933 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs @@ -3,6 +3,7 @@ mod cleanup_old_edges; mod connect_child; mod invalidate; mod update_cell; +mod update_collectible; mod update_output; use std::{ @@ -422,6 +423,10 @@ impl TaskGuard<'_> { self.task.get(key) } + pub fn get_mut(&mut self, key: &CachedDataItemKey) -> Option<&mut CachedDataItemValue> { + self.task.get_mut(key) + } + pub fn has_key(&self, key: &CachedDataItemKey) -> bool { self.task.has_key(key) } @@ -499,6 +504,7 @@ macro_rules! impl_operation { pub enum AnyOperation { ConnectChild(connect_child::ConnectChildOperation), Invalidate(invalidate::InvalidateOperation), + UpdateOutput(update_output::UpdateOutputOperation), CleanupOldEdges(cleanup_old_edges::CleanupOldEdgesOperation), AggregationUpdate(aggregation_update::AggregationUpdateQueue), Nested(Vec), @@ -509,6 +515,7 @@ impl AnyOperation { match self { AnyOperation::ConnectChild(op) => op.execute(ctx), AnyOperation::Invalidate(op) => op.execute(ctx), + AnyOperation::UpdateOutput(op) => op.execute(ctx), AnyOperation::CleanupOldEdges(op) => op.execute(ctx), AnyOperation::AggregationUpdate(op) => op.execute(ctx), AnyOperation::Nested(ops) => { @@ -522,6 +529,7 @@ impl AnyOperation { impl_operation!(ConnectChild connect_child::ConnectChildOperation); impl_operation!(Invalidate invalidate::InvalidateOperation); +impl_operation!(UpdateOutput update_output::UpdateOutputOperation); impl_operation!(CleanupOldEdges cleanup_old_edges::CleanupOldEdgesOperation); impl_operation!(AggregationUpdate aggregation_update::AggregationUpdateQueue); @@ -531,5 +539,5 @@ pub use self::{ }, cleanup_old_edges::OutdatedEdge, update_cell::UpdateCellOperation, - update_output::UpdateOutputOperation, + update_collectible::UpdateCollectibleOperation, }; diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs new file mode 100644 index 0000000000000..8fda9d51e10ce --- /dev/null +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs @@ -0,0 +1,56 @@ +use std::cmp::min; + +use turbo_tasks::TaskId; + +use crate::{ + backend::{ + operation::{ + AggregatedDataUpdate, AggregationUpdateJob, AggregationUpdateQueue, ExecuteContext, + Operation, + }, + storage::{get, update_count}, + TaskDataCategory, + }, + data::CollectibleRef, +}; + +pub struct UpdateCollectibleOperation; + +impl UpdateCollectibleOperation { + pub fn run( + task_id: TaskId, + collectible: CollectibleRef, + count: i32, + mut ctx: ExecuteContext<'_>, + ) { + let mut queue = AggregationUpdateQueue::new(); + let mut task = ctx.task(task_id, TaskDataCategory::All); + let outdated = get!(task, OutdatedCollectible { collectible }).copied(); + if let Some(outdated) = outdated { + if count > 0 && outdated > 0 { + update_count!( + task, + OutdatedCollectible { collectible }, + -min(count, outdated) + ); + } else if count < 0 && outdated < 0 { + update_count!( + task, + OutdatedCollectible { collectible }, + min(-count, -outdated) + ); + } else { + // Not reduced from outdated + } + } + update_count!(task, Collectible { collectible }, count); + queue.extend(AggregationUpdateJob::data_update( + &mut task, + AggregatedDataUpdate::new().collectibles_update(vec![(collectible, count)]), + )); + + drop(task); + + queue.execute(&mut ctx); + } +} diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_output.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_output.rs index fb12b8752bf7d..f791a11930b87 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_output.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_output.rs @@ -1,18 +1,41 @@ -use std::borrow::Cow; +use std::{borrow::Cow, mem::take}; use anyhow::{anyhow, Result}; +use serde::{Deserialize, Serialize}; use turbo_tasks::{util::SharedError, RawVc, TaskId}; use crate::{ backend::{ - operation::{ExecuteContext, InvalidateOperation}, - storage::get_many, + operation::{ + invalidate::{make_task_dirty, make_task_dirty_internal}, + AggregationUpdateQueue, ExecuteContext, Operation, + }, + storage::{get, get_many}, TaskDataCategory, }, - data::{CachedDataItem, CachedDataItemKey, CachedDataItemValue, CellRef, OutputValue}, + data::{ + CachedDataItem, CachedDataItemKey, CachedDataItemValue, CellRef, InProgressState, + OutputValue, + }, }; -pub struct UpdateOutputOperation; +#[derive(Serialize, Deserialize, Clone, Default)] +pub enum UpdateOutputOperation { + MakeDependentTasksDirty { + dependent_tasks: Vec, + children: Vec, + queue: AggregationUpdateQueue, + }, + EnsureUnfinishedChildrenDirty { + children: Vec, + queue: AggregationUpdateQueue, + }, + AggregationUpdate { + queue: AggregationUpdateQueue, + }, + #[default] + Done, +} impl UpdateOutputOperation { pub fn run( @@ -21,6 +44,10 @@ impl UpdateOutputOperation { mut ctx: ExecuteContext<'_>, ) { let mut task = ctx.task(task_id, TaskDataCategory::Data); + if let Some(InProgressState::InProgress { stale: true, .. }) = get!(task, InProgress) { + // Skip updating the output when the task is stale + return; + } let old_error = task.remove(&CachedDataItemKey::Error {}); let current_output = task.get(&CachedDataItemKey::Output {}); let output_value = match output { @@ -76,12 +103,69 @@ impl UpdateOutputOperation { value: output_value, }); - let dependent = get_many!(task, OutputDependent { task } _value => task); + let dependent_tasks = get_many!(task, OutputDependent { task } => task); + let children = get_many!(task, Child { task } => task); + + let mut queue = AggregationUpdateQueue::new(); + + make_task_dirty_internal(&mut task, task_id, false, &mut queue, &mut ctx); drop(task); drop(old_content); drop(old_error); - InvalidateOperation::run(dependent, ctx); + UpdateOutputOperation::MakeDependentTasksDirty { + dependent_tasks, + children, + queue, + } + .execute(&mut ctx); + } +} + +impl Operation for UpdateOutputOperation { + fn execute(mut self, ctx: &mut ExecuteContext<'_>) { + loop { + ctx.operation_suspend_point(&self); + match self { + UpdateOutputOperation::MakeDependentTasksDirty { + ref mut dependent_tasks, + ref mut children, + ref mut queue, + } => { + if let Some(dependent_task_id) = dependent_tasks.pop() { + make_task_dirty(dependent_task_id, queue, ctx); + } + if dependent_tasks.is_empty() { + self = UpdateOutputOperation::EnsureUnfinishedChildrenDirty { + children: take(children), + queue: take(queue), + }; + } + } + UpdateOutputOperation::EnsureUnfinishedChildrenDirty { + ref mut children, + ref mut queue, + } => { + if let Some(child_id) = children.pop() { + let mut child_task = ctx.task(child_id, TaskDataCategory::Data); + if !child_task.has_key(&CachedDataItemKey::Output {}) { + make_task_dirty_internal(&mut child_task, child_id, false, queue, ctx); + } + } + if children.is_empty() { + self = UpdateOutputOperation::AggregationUpdate { queue: take(queue) }; + } + } + UpdateOutputOperation::AggregationUpdate { ref mut queue } => { + if queue.process(ctx) { + self = UpdateOutputOperation::Done; + } + } + UpdateOutputOperation::Done => { + return; + } + } + } } } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs b/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs index 1ad5898002cb5..bf7aa5360663f 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs @@ -238,6 +238,10 @@ where self.get_map(key).and_then(|m| m.get(key)) } + pub fn get_mut(&mut self, key: &T::Key) -> Option<&mut T::Value> { + self.get_map_mut(key).get_mut(key) + } + pub fn has_key(&self, key: &T::Key) -> bool { self.get_map(key) .map(|m| m.contains_key(key)) @@ -403,8 +407,22 @@ macro_rules! get { }; } +macro_rules! get_mut { + ($task:ident, $key:ident $input:tt) => { + if let Some($crate::data::CachedDataItemValue::$key { value }) = $task.get_mut(&$crate::data::CachedDataItemKey::$key $input).as_mut() { + let () = $crate::data::allow_mut_access::$key; + Some(value) + } else { + None + } + }; + ($task:ident, $key:ident) => { + $crate::backend::storage::get_mut!($task, $key {}) + }; +} + macro_rules! iter_many { - ($task:ident, $key:ident $input:tt => $value:ident) => { + ($task:ident, $key:ident $input:tt => $value:expr) => { $task .iter($crate::data::indicies::$key) .filter_map(|(key, _)| match *key { @@ -412,6 +430,22 @@ macro_rules! iter_many { _ => None, }) }; + ($task:ident, $key:ident $input:tt => $value:expr) => { + $task + .iter($crate::data::indicies::$key) + .filter_map(|(key, _)| match key { + $crate::data::CachedDataItemKey::$key $input => Some($value), + _ => None, + }) + }; + ($task:ident, $key:ident $input:tt if $cond:expr => $value:expr) => { + $task + .iter($crate::data::indicies::$key) + .filter_map(|(key, _)| match key { + $crate::data::CachedDataItemKey::$key $input if $cond => Some($value), + _ => None, + }) + }; ($task:ident, $key:ident $input:tt $value_ident:ident => $value:expr) => { $task .iter($crate::data::indicies::$key) @@ -431,9 +465,15 @@ macro_rules! iter_many { } macro_rules! get_many { - ($task:ident, $key:ident $input:tt => $value:ident) => { + ($task:ident, $key:ident $input:tt => $value:expr) => { $crate::backend::storage::iter_many!($task, $key $input => $value).collect() }; + ($task:ident, $key:ident $input:tt => $value:expr) => { + $crate::backend::storage::iter_many!($task, $key $input => $value).collect() + }; + ($task:ident, $key:ident $input:tt if $cond:expr => $value:expr) => { + $crate::backend::storage::iter_many!($task, $key $input if $cond => $value).collect() + }; ($task:ident, $key:ident $input:tt $value_ident:ident => $value:expr) => { $crate::backend::storage::iter_many!($task, $key $input $value_ident => $value).collect() }; @@ -502,6 +542,7 @@ macro_rules! remove { pub(crate) use get; pub(crate) use get_many; +pub(crate) use get_mut; pub(crate) use iter_many; pub(crate) use remove; pub(crate) use update; diff --git a/turbopack/crates/turbo-tasks-backend/src/data.rs b/turbopack/crates/turbo-tasks-backend/src/data.rs index 4500d7037e1a4..ea42f2ed1a3dd 100644 --- a/turbopack/crates/turbo-tasks-backend/src/data.rs +++ b/turbopack/crates/turbo-tasks-backend/src/data.rs @@ -3,7 +3,7 @@ use turbo_tasks::{ event::{Event, EventListener}, registry, util::SharedError, - CellId, KeyValuePair, TaskId, TypedSharedReference, ValueTypeId, + CellId, KeyValuePair, TaskId, TraitTypeId, TypedSharedReference, ValueTypeId, }; use crate::backend::{indexed::Indexed, TaskDataCategory}; @@ -34,10 +34,16 @@ pub struct CellRef { pub cell: CellId, } +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub struct CollectibleRef { + pub collectible_type: TraitTypeId, + pub cell: CellRef, +} + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] pub struct CollectiblesRef { pub task: TaskId, - pub collectible_type: ValueTypeId, + pub collectible_type: TraitTypeId, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -65,10 +71,10 @@ pub struct RootState { } impl RootState { - pub fn new(ty: ActiveType) -> Self { + pub fn new(ty: ActiveType, id: TaskId) -> Self { Self { ty, - all_clean_event: Event::new(|| "RootState::all_clean_event".to_string()), + all_clean_event: Event::new(move || format!("RootState::all_clean_event {:?}", id)), } } } @@ -137,8 +143,8 @@ pub enum CachedDataItem { value: OutputValue, }, Collectible { - collectible: CellRef, - value: (), + collectible: CollectibleRef, + value: i32, }, // State @@ -190,7 +196,7 @@ pub enum CachedDataItem { value: (), }, CollectiblesDependent { - collectibles_type: ValueTypeId, + collectible_type: TraitTypeId, task: TaskId, value: (), }, @@ -214,7 +220,7 @@ pub enum CachedDataItem { value: i32, }, AggregatedCollectible { - collectible: CellRef, + collectible: CollectibleRef, value: i32, }, AggregatedDirtyContainerCount { @@ -237,18 +243,27 @@ pub enum CachedDataItem { cell: CellId, value: InProgressCellState, }, + #[serde(skip)] OutdatedCollectible { - collectible: CellRef, - value: (), + collectible: CollectibleRef, + value: i32, }, + #[serde(skip)] OutdatedOutputDependency { target: TaskId, value: (), }, + #[serde(skip)] OutdatedCellDependency { target: CellRef, value: (), }, + #[serde(skip)] + OutdatedCollectiblesDependency { + target: CollectiblesRef, + value: (), + }, + #[serde(skip)] OutdatedChild { task: TaskId, value: (), @@ -265,7 +280,9 @@ impl CachedDataItem { pub fn is_persistent(&self) -> bool { match self { CachedDataItem::Output { value } => value.is_transient(), - CachedDataItem::Collectible { collectible, .. } => !collectible.task.is_transient(), + CachedDataItem::Collectible { collectible, .. } => { + !collectible.cell.task.is_transient() + } CachedDataItem::Dirty { .. } => true, CachedDataItem::DirtyWhenPersisted { .. } => true, CachedDataItem::Child { task, .. } => !task.is_transient(), @@ -282,7 +299,7 @@ impl CachedDataItem { CachedDataItem::Upper { task, .. } => !task.is_transient(), CachedDataItem::AggregatedDirtyContainer { task, .. } => !task.is_transient(), CachedDataItem::AggregatedCollectible { collectible, .. } => { - !collectible.task.is_transient() + !collectible.cell.task.is_transient() } CachedDataItem::AggregatedDirtyContainerCount { .. } => true, CachedDataItem::AggregateRoot { .. } => false, @@ -291,6 +308,7 @@ impl CachedDataItem { CachedDataItem::OutdatedCollectible { .. } => false, CachedDataItem::OutdatedOutputDependency { .. } => false, CachedDataItem::OutdatedCellDependency { .. } => false, + CachedDataItem::OutdatedCollectiblesDependency { .. } => false, CachedDataItem::OutdatedChild { .. } => false, CachedDataItem::Error { .. } => false, } @@ -327,7 +345,9 @@ impl CachedDataItemKey { pub fn is_persistent(&self) -> bool { match self { CachedDataItemKey::Output { .. } => true, - CachedDataItemKey::Collectible { collectible, .. } => !collectible.task.is_transient(), + CachedDataItemKey::Collectible { collectible, .. } => { + !collectible.cell.task.is_transient() + } CachedDataItemKey::Dirty { .. } => true, CachedDataItemKey::DirtyWhenPersisted { .. } => true, CachedDataItemKey::Child { task, .. } => !task.is_transient(), @@ -344,7 +364,7 @@ impl CachedDataItemKey { CachedDataItemKey::Upper { task, .. } => !task.is_transient(), CachedDataItemKey::AggregatedDirtyContainer { task, .. } => !task.is_transient(), CachedDataItemKey::AggregatedCollectible { collectible, .. } => { - !collectible.task.is_transient() + !collectible.cell.task.is_transient() } CachedDataItemKey::AggregatedDirtyContainerCount { .. } => true, CachedDataItemKey::AggregateRoot { .. } => false, @@ -353,6 +373,7 @@ impl CachedDataItemKey { CachedDataItemKey::OutdatedCollectible { .. } => false, CachedDataItemKey::OutdatedOutputDependency { .. } => false, CachedDataItemKey::OutdatedCellDependency { .. } => false, + CachedDataItemKey::OutdatedCollectiblesDependency { .. } => false, CachedDataItemKey::OutdatedChild { .. } => false, CachedDataItemKey::Error { .. } => false, } @@ -376,6 +397,7 @@ impl CachedDataItemKey { | CachedDataItemKey::OutdatedCollectible { .. } | CachedDataItemKey::OutdatedOutputDependency { .. } | CachedDataItemKey::OutdatedCellDependency { .. } + | CachedDataItemKey::OutdatedCollectiblesDependency { .. } | CachedDataItemKey::OutdatedChild { .. } | CachedDataItemKey::Error { .. } => TaskDataCategory::Data, @@ -392,16 +414,27 @@ impl CachedDataItemKey { } } +/// Used by the [`get_mut`][crate::backend::storage::get_mut] macro to restrict mutable access to a +/// subset of types. No mutable access should be allowed for persisted data, since that would break +/// persisting. +#[allow(non_upper_case_globals, dead_code)] +pub mod allow_mut_access { + pub const InProgress: () = (); +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum CachedDataItemIndex { Children, + Collectibles, Follower, Upper, AggregatedDirtyContainer, + AggregatedCollectible, CellData, CellTypeMaxIndex, CellDependent, OutputDependent, + CollectiblesDependent, Dependencies, } @@ -411,18 +444,29 @@ pub mod indicies { pub const Child: CachedDataItemIndex = CachedDataItemIndex::Children; pub const OutdatedChild: CachedDataItemIndex = CachedDataItemIndex::Children; + pub const Collectible: CachedDataItemIndex = CachedDataItemIndex::Collectibles; + pub const OutdatedCollectible: CachedDataItemIndex = CachedDataItemIndex::Collectibles; pub const Follower: CachedDataItemIndex = CachedDataItemIndex::Follower; pub const Upper: CachedDataItemIndex = CachedDataItemIndex::Upper; pub const AggregatedDirtyContainer: CachedDataItemIndex = CachedDataItemIndex::AggregatedDirtyContainer; + pub const AggregatedCollectible: CachedDataItemIndex = + CachedDataItemIndex::AggregatedCollectible; pub const CellData: CachedDataItemIndex = CachedDataItemIndex::CellData; pub const CellTypeMaxIndex: CachedDataItemIndex = CachedDataItemIndex::CellTypeMaxIndex; pub const CellDependent: CachedDataItemIndex = CachedDataItemIndex::CellDependent; pub const OutputDependent: CachedDataItemIndex = CachedDataItemIndex::OutputDependent; + pub const CollectiblesDependent: CachedDataItemIndex = + CachedDataItemIndex::CollectiblesDependent; pub const OutputDependency: CachedDataItemIndex = CachedDataItemIndex::Dependencies; pub const CellDependency: CachedDataItemIndex = CachedDataItemIndex::Dependencies; + pub const CollectibleDependency: CachedDataItemIndex = CachedDataItemIndex::Dependencies; pub const OutdatedOutputDependency: CachedDataItemIndex = CachedDataItemIndex::Dependencies; pub const OutdatedCellDependency: CachedDataItemIndex = CachedDataItemIndex::Dependencies; + pub const OutdatedCollectiblesDependency: CachedDataItemIndex = + CachedDataItemIndex::Dependencies; + pub const OutdatedCollectibleDependency: CachedDataItemIndex = + CachedDataItemIndex::Dependencies; } impl Indexed for CachedDataItemKey { @@ -432,11 +476,18 @@ impl Indexed for CachedDataItemKey { match self { CachedDataItemKey::Child { .. } => Some(CachedDataItemIndex::Children), CachedDataItemKey::OutdatedChild { .. } => Some(CachedDataItemIndex::Children), + CachedDataItemKey::Collectible { .. } => Some(CachedDataItemIndex::Collectibles), + CachedDataItemKey::OutdatedCollectible { .. } => { + Some(CachedDataItemIndex::Collectibles) + } CachedDataItemKey::Follower { .. } => Some(CachedDataItemIndex::Follower), CachedDataItemKey::Upper { .. } => Some(CachedDataItemIndex::Upper), CachedDataItemKey::AggregatedDirtyContainer { .. } => { Some(CachedDataItemIndex::AggregatedDirtyContainer) } + CachedDataItemKey::AggregatedCollectible { .. } => { + Some(CachedDataItemIndex::AggregatedCollectible) + } CachedDataItemKey::CellData { .. } => Some(CachedDataItemIndex::CellData), CachedDataItemKey::CellTypeMaxIndex { .. } => { Some(CachedDataItemIndex::CellTypeMaxIndex) @@ -445,12 +496,18 @@ impl Indexed for CachedDataItemKey { CachedDataItemKey::OutputDependent { .. } => Some(CachedDataItemIndex::OutputDependent), CachedDataItemKey::OutputDependency { .. } => Some(CachedDataItemIndex::Dependencies), CachedDataItemKey::CellDependency { .. } => Some(CachedDataItemIndex::Dependencies), + CachedDataItemKey::CollectiblesDependency { .. } => { + Some(CachedDataItemIndex::Dependencies) + } CachedDataItemKey::OutdatedOutputDependency { .. } => { Some(CachedDataItemIndex::Dependencies) } CachedDataItemKey::OutdatedCellDependency { .. } => { Some(CachedDataItemIndex::Dependencies) } + CachedDataItemKey::OutdatedCollectiblesDependency { .. } => { + Some(CachedDataItemIndex::Dependencies) + } _ => None, } } diff --git a/turbopack/crates/turbo-tasks-backend/tests/collectibles.rs b/turbopack/crates/turbo-tasks-backend/tests/collectibles.rs new file mode 120000 index 0000000000000..7de5bc7d80499 --- /dev/null +++ b/turbopack/crates/turbo-tasks-backend/tests/collectibles.rs @@ -0,0 +1 @@ +../../turbo-tasks-testing/tests/collectibles.rs \ No newline at end of file diff --git a/turbopack/crates/turbo-tasks-backend/tests/dirty_in_progress.rs b/turbopack/crates/turbo-tasks-backend/tests/dirty_in_progress.rs new file mode 120000 index 0000000000000..0a45daf6b8443 --- /dev/null +++ b/turbopack/crates/turbo-tasks-backend/tests/dirty_in_progress.rs @@ -0,0 +1 @@ +../../turbo-tasks-testing/tests/dirty_in_progress.rs \ No newline at end of file diff --git a/turbopack/crates/turbo-tasks-backend/tests/recompute_collectibles.rs b/turbopack/crates/turbo-tasks-backend/tests/recompute_collectibles.rs new file mode 120000 index 0000000000000..664d8d48d1408 --- /dev/null +++ b/turbopack/crates/turbo-tasks-backend/tests/recompute_collectibles.rs @@ -0,0 +1 @@ +../../turbo-tasks-testing/tests/recompute_collectibles.rs \ No newline at end of file diff --git a/turbopack/crates/turbo-tasks-testing/tests/dirty_in_progress.rs b/turbopack/crates/turbo-tasks-testing/tests/dirty_in_progress.rs index c10bc18a27acf..d3c17eba0dbf4 100644 --- a/turbopack/crates/turbo-tasks-testing/tests/dirty_in_progress.rs +++ b/turbopack/crates/turbo-tasks-testing/tests/dirty_in_progress.rs @@ -38,6 +38,7 @@ async fn dirty_in_progress() { let read = output.strongly_consistent().await?; assert_eq!(read.value, value); assert_eq!(read.collectible, collectible); + println!("\n"); } anyhow::Ok(()) }) From d9a9a5d6ecf097c3658be68fbc6e00ba7fdff7dc Mon Sep 17 00:00:00 2001 From: Jiachi Liu Date: Thu, 10 Oct 2024 17:51:05 +0200 Subject: [PATCH 2/8] codemod: do not await on invalid prop (#71089) --- .../access-props-32.input.tsx | 6 ++++++ .../access-props-32.output.tsx | 6 ++++++ .../lib/async-request-api/next-async-dynamic-prop.ts | 9 +++++++++ 3 files changed, 21 insertions(+) create mode 100644 packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.input.tsx create mode 100644 packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.output.tsx diff --git a/packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.input.tsx b/packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.input.tsx new file mode 100644 index 0000000000000..bed7e00cf5623 --- /dev/null +++ b/packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.input.tsx @@ -0,0 +1,6 @@ +export async function GET( + req: NextRequest, + ctx: any, +): Promise { + callback(ctx.propDoesNotExist); +} diff --git a/packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.output.tsx b/packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.output.tsx new file mode 100644 index 0000000000000..bed7e00cf5623 --- /dev/null +++ b/packages/next-codemod/transforms/__testfixtures__/next-async-request-api-dynamic-props/access-props-32.output.tsx @@ -0,0 +1,6 @@ +export async function GET( + req: NextRequest, + ctx: any, +): Promise { + callback(ctx.propDoesNotExist); +} diff --git a/packages/next-codemod/transforms/lib/async-request-api/next-async-dynamic-prop.ts b/packages/next-codemod/transforms/lib/async-request-api/next-async-dynamic-prop.ts index df62c81754629..5bb61d7bbf174 100644 --- a/packages/next-codemod/transforms/lib/async-request-api/next-async-dynamic-prop.ts +++ b/packages/next-codemod/transforms/lib/async-request-api/next-async-dynamic-prop.ts @@ -51,6 +51,15 @@ function awaitMemberAccessOfProp( memberAccess.forEach((memberAccessPath) => { const member = memberAccessPath.value + const memberProperty = member.property + const isAccessingMatchedProperty = + j.Identifier.check(memberProperty) && + TARGET_PROP_NAMES.has(memberProperty.name) + + if (!isAccessingMatchedProperty) { + return + } + if (isParentPromiseAllCallExpression(memberAccessPath, j)) { return } From 61f5b95cf6f63c7092992a24eb576a1bd46e822d Mon Sep 17 00:00:00 2001 From: Zack Tanner <1939140+ztanner@users.noreply.github.com> Date: Thu, 10 Oct 2024 08:52:08 -0700 Subject: [PATCH 3/8] docs: misc typo corrections in upgrade guide & codemod (#71098) Caught these when reviewing the published changes. --- .../11-upgrading/01-codemods.mdx | 6 +++--- .../11-upgrading/02-version-15.mdx | 2 +- errors/sync-dynamic-apis.mdx | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx b/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx index 9a53583b80b57..f92b18452438b 100644 --- a/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx +++ b/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx @@ -52,7 +52,7 @@ export const runtime = 'edge' #### Migrate to async Dynamic APIs -APIs that opted into dynamic rendering that previously supported synchronous access are now asynchronous. You can read more about this breaking change in the [upgrade guide](/docs/app/building-your-application/upgrading/version-15) +APIs that opted into dynamic rendering that previously supported synchronous access are now asynchronous. You can read more about this breaking change in the [upgrade guide](/docs/app/building-your-application/upgrading/version-15). ##### `next-async-request-api` @@ -117,7 +117,7 @@ export default function Page({ searchParams, }: { params: { slug: string } - searchParams: { [key: string]: string | undefined } + searchParams: { [key: string]: string | string[] | undefined } }) { const { value } = searchParams if (value === 'foo') { @@ -138,7 +138,7 @@ Transforms into: // page.tsx export default function Page(props: { params: { slug: string } - searchParams: { [key: string]: string | undefined } + searchParams: { [key: string]: string | string[] | undefined } }) { const { value } = await props.searchParams if (value === 'foo') { diff --git a/docs/02-app/01-building-your-application/11-upgrading/02-version-15.mdx b/docs/02-app/01-building-your-application/11-upgrading/02-version-15.mdx index a48dd71084e15..971dd2d287782 100644 --- a/docs/02-app/01-building-your-application/11-upgrading/02-version-15.mdx +++ b/docs/02-app/01-building-your-application/11-upgrading/02-version-15.mdx @@ -205,7 +205,7 @@ export default async function Layout({ children: React.ReactNode params: Params }) { - const { slug } = await props + const { slug } = await params } ``` diff --git a/errors/sync-dynamic-apis.mdx b/errors/sync-dynamic-apis.mdx index b87338e414aa1..bc8cad0f83a34 100644 --- a/errors/sync-dynamic-apis.mdx +++ b/errors/sync-dynamic-apis.mdx @@ -12,7 +12,7 @@ Dynamic APIs are: - The `params` and `searchParams` props that get provided to pages, layouts, metadata APIs, and route handlers. - `cookies()`, `draftMode()`, and `headers()` from `next/headers` -In Next 15, these APIs have been made asynchronous. You can read more about this in the Next.js 15 [Upgrade Guide](/docs/app/building-your-application/upgrading/version-15) +In Next 15, these APIs have been made asynchronous. You can read more about this in the Next.js 15 [Upgrade Guide](/docs/app/building-your-application/upgrading/version-15). For example, the following code will issue a warning: From f15a3f48ad10bc45704ee08540a0bb125bff3fd5 Mon Sep 17 00:00:00 2001 From: Jiwon Choi Date: Fri, 11 Oct 2024 02:11:27 +0900 Subject: [PATCH 4/8] chore(next-codemod): move app-dir-runtime-config-experimental-edge to 13.1.2 (#71081) ### Why? Although the breaking change for `export runtime = 'experimental-edge'` for App Router was recent at https://github.com/vercel/next.js/pull/70480, we did move from experimental to `edge` at https://github.com/vercel/next.js/pull/44045, and added log at https://github.com/vercel/next.js/pull/44563 (release [v13.1.2](https://github.com/vercel/next.js/releases/tag/v13.1.2)) Therefore we lower the target codemod version to 13.1.2. --------- Co-authored-by: Jiachi Liu --- .../11-upgrading/01-codemods.mdx | 50 ++++++++++--------- packages/next-codemod/lib/utils.ts | 12 ++--- 2 files changed, 32 insertions(+), 30 deletions(-) diff --git a/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx b/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx index f92b18452438b..3a80222447eab 100644 --- a/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx +++ b/docs/02-app/01-building-your-application/11-upgrading/01-codemods.mdx @@ -26,30 +26,6 @@ Replacing `` and `` with appropriate values. ### 15.0 -#### Transform App Router Route Segment Config `runtime` value from `experimental-edge` to `edge` - -##### `app-dir-runtime-config-experimental-edge` - -> **Note**: This codemod is App Router specific. - -```bash filename="Terminal" -npx @next/codemod@latest app-dir-runtime-config-experimental-edge . -``` - -This codemod transforms [Route Segment Config `runtime`](https://nextjs.org/docs/app/api-reference/file-conventions/route-segment-config#runtime) value `experimental-edge` to `edge`. - -For example: - -```ts -export const runtime = 'experimental-edge' -``` - -Transforms into: - -```ts -export const runtime = 'edge' -``` - #### Migrate to async Dynamic APIs APIs that opted into dynamic rendering that previously supported synchronous access are now asynchronous. You can read more about this breaking change in the [upgrade guide](/docs/app/building-your-application/upgrading/version-15). @@ -311,6 +287,32 @@ Transforms into: import { Inter } from 'next/font/google' ``` +### 13.1.2 + +#### Transform App Router Route Segment Config `runtime` value from `experimental-edge` to `edge` + +##### `app-dir-runtime-config-experimental-edge` + +> **Note**: This codemod is App Router specific. + +```bash filename="Terminal" +npx @next/codemod@latest app-dir-runtime-config-experimental-edge . +``` + +This codemod transforms [Route Segment Config `runtime`](https://nextjs.org/docs/app/api-reference/file-conventions/route-segment-config#runtime) value `experimental-edge` to `edge`. + +For example: + +```ts +export const runtime = 'experimental-edge' +``` + +Transforms into: + +```ts +export const runtime = 'edge' +``` + ### 13.0 #### Rename Next Image Imports diff --git a/packages/next-codemod/lib/utils.ts b/packages/next-codemod/lib/utils.ts index c895e5315dd0d..a7a28a1ce3d63 100644 --- a/packages/next-codemod/lib/utils.ts +++ b/packages/next-codemod/lib/utils.ts @@ -82,6 +82,12 @@ export const TRANSFORMER_INQUIRER_CHOICES = [ value: 'next-image-to-legacy-image', version: '13.0', }, + { + title: + 'Transform App Router Route Segment Config `runtime` value from `experimental-edge` to `edge`', + value: 'app-dir-runtime-config-experimental-edge', + version: '13.1.2', + }, { title: 'Uninstall `@next/font` and transform imports to `next/font`', value: 'built-in-next-font', @@ -116,10 +122,4 @@ export const TRANSFORMER_INQUIRER_CHOICES = [ value: 'next-async-request-api', version: '15.0.0-canary.171', }, - { - title: - 'Transform App Router Route Segment Config `runtime` value from `experimental-edge` to `edge`', - value: 'app-dir-runtime-config-experimental-edge', - version: '15.0.0-canary.179', - }, ] From 90ecf79c3654e3f713c662da7f932ba05187574b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Markb=C3=A5ge?= Date: Thu, 10 Oct 2024 13:25:23 -0400 Subject: [PATCH 5/8] Add cacheLife() / cacheTag() APIs (#71064) Collect the revalidate time and tags, then propagate it upwards and store on the cache. --- packages/next/cache.js | 2 + .../work-unit-async-storage.external.ts | 3 +- .../next/src/server/use-cache/cache-life.ts | 143 ++++++++++++++ .../next/src/server/use-cache/cache-tag.ts | 25 +++ .../src/server/use-cache/use-cache-wrapper.ts | 184 +++++++++++++----- 5 files changed, 307 insertions(+), 50 deletions(-) create mode 100644 packages/next/src/server/use-cache/cache-life.ts create mode 100644 packages/next/src/server/use-cache/cache-tag.ts diff --git a/packages/next/cache.js b/packages/next/cache.js index 091bdc46b7a35..15bb9c978464c 100644 --- a/packages/next/cache.js +++ b/packages/next/cache.js @@ -8,6 +8,8 @@ const cacheExports = { unstable_noStore: require('next/dist/server/web/spec-extension/unstable-no-store') .unstable_noStore, + unstable_cacheLife: require('next/dist/server/use-cache/cache-life'), + unstable_cacheTag: require('next/dist/server/use-cache/cache-tag'), } // https://nodejs.org/api/esm.html#commonjs-namespaces diff --git a/packages/next/src/server/app-render/work-unit-async-storage.external.ts b/packages/next/src/server/app-render/work-unit-async-storage.external.ts index 2ffee7c093e14..5760ec5c708ff 100644 --- a/packages/next/src/server/app-render/work-unit-async-storage.external.ts +++ b/packages/next/src/server/app-render/work-unit-async-storage.external.ts @@ -106,7 +106,8 @@ export type PrerenderStore = export type UseCacheStore = { type: 'cache' // Collected revalidate times and tags for this cache entry during the cache render. - revalidate: number // in seconds. INFINITE_CACHE and higher means never revalidate. + revalidate: number // implicit revalidate time from inner caches / fetches + explicitRevalidate: undefined | number // explicit revalidate time from cacheLife() calls tags: null | string[] } diff --git a/packages/next/src/server/use-cache/cache-life.ts b/packages/next/src/server/use-cache/cache-life.ts new file mode 100644 index 0000000000000..909f8c449ebbb --- /dev/null +++ b/packages/next/src/server/use-cache/cache-life.ts @@ -0,0 +1,143 @@ +import { workUnitAsyncStorage } from '../app-render/work-unit-async-storage.external' + +export type CacheLife = { + // How long the client can cache a value without checking with the server. + stale?: number + // How frequently you want the cache to refresh on the server. + // Stale values may be served while revalidating. + revalidate?: number + // In the worst case scenario, where you haven't had traffic in a while, + // how stale can a value be until you prefer deopting to dynamic. + // Must be longer than revalidate. + expire?: number +} +// The equivalent header is kind of like: +// Cache-Control: max-age=[stale],s-max-age=[revalidate],stale-while-revalidate=[expire-revalidate],stale-if-error=[expire-revalidate] +// Except that stale-while-revalidate/stale-if-error only applies to shared caches - not private caches. + +const cacheLifeProfileMap: Map = new Map() + +// The default revalidates relatively frequently but doesn't expire to ensure it's always +// able to serve fast results but by default doesn't hang. + +export const defaultCacheLife = { + stale: Number(process.env.__NEXT_CLIENT_ROUTER_STATIC_STALETIME), + revalidate: 15 * 60, // Note: This is a new take on the defaults. + expire: Infinity, +} + +cacheLifeProfileMap.set('default', defaultCacheLife) + +type CacheLifeProfiles = 'default' // TODO: Generate from the config + +function validateCacheLife(profile: CacheLife) { + if (profile.stale !== undefined) { + if ((profile.stale as any) === false) { + throw new Error( + 'Pass `Infinity` instead of `false` if you want to cache on the client forever ' + + 'without checking with the server.' + ) + } else if (typeof profile.stale !== 'number') { + throw new Error('The stale option must be a number of seconds.') + } + } + if (profile.revalidate !== undefined) { + if ((profile.revalidate as any) === false) { + throw new Error( + 'Pass `Infinity` instead of `false` if you do not want to revalidate by time.' + ) + } else if (typeof profile.revalidate !== 'number') { + throw new Error('The revalidate option must be a number of seconds.') + } + } + if (profile.expire !== undefined) { + if ((profile.expire as any) === false) { + throw new Error( + 'Pass `Infinity` instead of `false` if you want to cache on the client forever ' + + 'without checking with the server.' + ) + } else if (typeof profile.expire !== 'number') { + throw new Error('The expire option must be a number of seconds.') + } + } + + if (profile.revalidate !== undefined && profile.expire !== undefined) { + if (profile.revalidate > profile.expire) { + throw new Error( + 'If providing both the revalidate and expire options, ' + + 'the expire option must be greater than the revalidate option.' + + 'The expire option indicates how many seconds from the start ' + + 'until it can no longer be used.' + ) + } + } + + if (profile.stale !== undefined && profile.expire !== undefined) { + if (profile.stale > profile.expire) { + throw new Error( + 'If providing both the stale and expire options, ' + + 'the expire option must be greater than the stale option.' + + 'The expire option indicates how many seconds from the start ' + + 'until it can no longer be used.' + ) + } + } +} + +export function cacheLife(profile: CacheLifeProfiles | CacheLife): void { + if (!process.env.__NEXT_DYNAMIC_IO) { + throw new Error( + 'cacheLife() is only available with the experimental.dynamicIO config.' + ) + } + + const workUnitStore = workUnitAsyncStorage.getStore() + if (!workUnitStore || workUnitStore.type !== 'cache') { + throw new Error( + 'cacheLife() can only be called inside a "use cache" function.' + ) + } + + if (typeof profile === 'string') { + const configuredProfile = cacheLifeProfileMap.get(profile) + if (configuredProfile === undefined) { + if (cacheLifeProfileMap.has(profile.trim())) { + throw new Error( + `Unknown cacheLife profile "${profile}" is not configured in next.config.js\n` + + `Did you mean "${profile.trim()}" without the spaces?` + ) + } + throw new Error( + `Unknown cacheLife profile "${profile}" is not configured in next.config.js\n` + + 'module.exports = {\n' + + ' experimental: {\n' + + ' cacheLife: {\n' + + ` "${profile}": ...\n` + + ' }\n' + + ' }\n' + + '}' + ) + } + profile = configuredProfile + } else if ( + typeof profile !== 'object' || + profile === null || + Array.isArray(profile) + ) { + throw new Error( + 'Invalid cacheLife() option. Either pass a profile name or object.' + ) + } else { + validateCacheLife(profile) + } + + if (profile.revalidate !== undefined) { + // Track the explicit revalidate time. + if ( + workUnitStore.explicitRevalidate === undefined || + workUnitStore.explicitRevalidate > profile.revalidate + ) { + workUnitStore.explicitRevalidate = profile.revalidate + } + } +} diff --git a/packages/next/src/server/use-cache/cache-tag.ts b/packages/next/src/server/use-cache/cache-tag.ts new file mode 100644 index 0000000000000..25f105ea56b9c --- /dev/null +++ b/packages/next/src/server/use-cache/cache-tag.ts @@ -0,0 +1,25 @@ +import { workUnitAsyncStorage } from '../app-render/work-unit-async-storage.external' +import { validateTags } from '../lib/patch-fetch' + +export function cacheTag(...tags: string[]): void { + if (!process.env.__NEXT_DYNAMIC_IO) { + throw new Error( + 'cacheTag() is only available with the experimental.dynamicIO config.' + ) + } + + const workUnitStore = workUnitAsyncStorage.getStore() + if (!workUnitStore || workUnitStore.type !== 'cache') { + throw new Error( + 'cacheTag() can only be called inside a "use cache" function.' + ) + } + + const validTags = validateTags(tags, 'cacheTag()') + + if (!workUnitStore.tags) { + workUnitStore.tags = validTags + } else { + workUnitStore.tags.push(...validTags) + } +} diff --git a/packages/next/src/server/use-cache/use-cache-wrapper.ts b/packages/next/src/server/use-cache/use-cache-wrapper.ts index cf615270c750e..d0dbcd0de204e 100644 --- a/packages/next/src/server/use-cache/use-cache-wrapper.ts +++ b/packages/next/src/server/use-cache/use-cache-wrapper.ts @@ -14,7 +14,10 @@ import { import type { WorkStore } from '../app-render/work-async-storage.external' import { workAsyncStorage } from '../app-render/work-async-storage.external' -import type { UseCacheStore } from '../app-render/work-unit-async-storage.external' +import type { + UseCacheStore, + WorkUnitStore, +} from '../app-render/work-unit-async-storage.external' import { workUnitAsyncStorage } from '../app-render/work-unit-async-storage.external' import { runInCleanSnapshot } from '../app-render/clean-async-snapshot.external' @@ -24,7 +27,7 @@ import { getClientReferenceManifestSingleton, getServerModuleMap, } from '../app-render/encryption-utils' -import { INFINITE_CACHE } from '../../lib/constants' +import { defaultCacheLife } from './cache-life' const isEdgeRuntime = process.env.NEXT_RUNTIME === 'edge' @@ -36,28 +39,32 @@ type CacheEntry = { // we also don't want to reuse a stale entry for too long so stale entries // should be considered expired/missing in such CacheHandlers. stale: boolean + tags: string[] + revalidate: number } interface CacheHandler { get(cacheKey: string | ArrayBuffer): Promise - set(cacheKey: string | ArrayBuffer, value: ReadableStream): Promise + set(cacheKey: string | ArrayBuffer, value: Promise): Promise } const cacheHandlerMap: Map = new Map() // TODO: Move default implementation to be injectable. -const defaultCacheStorage: Map = new Map() +const defaultCacheStorage: Map = new Map() cacheHandlerMap.set('default', { - async get(cacheKey: string | ArrayBuffer) { + async get(cacheKey: string | ArrayBuffer): Promise { // TODO: Implement proper caching. if (typeof cacheKey === 'string') { - const value = defaultCacheStorage.get(cacheKey) - if (value !== undefined) { - const [returnStream, newSaved] = value.tee() - defaultCacheStorage.set(cacheKey, newSaved) + const entry = defaultCacheStorage.get(cacheKey) + if (entry !== undefined) { + const [returnStream, newSaved] = entry.value.tee() + entry.value = newSaved return { value: returnStream, stale: false, + revalidate: entry.revalidate, + tags: entry.tags, } } } else { @@ -65,25 +72,27 @@ cacheHandlerMap.set('default', { } return undefined }, - async set(cacheKey: string | ArrayBuffer, value: ReadableStream) { + async set(cacheKey: string | ArrayBuffer, promise: Promise) { + const entry = await promise // TODO: Implement proper caching. if (typeof cacheKey === 'string') { - defaultCacheStorage.set(cacheKey, value) + defaultCacheStorage.set(cacheKey, entry) } else { // TODO: Handle binary keys. - await value.cancel() + await entry.value.cancel() } }, }) function generateCacheEntry( workStore: WorkStore, + outerWorkUnitStore: WorkUnitStore | undefined, clientReferenceManifest: DeepReadonly, cacheHandler: CacheHandler, serializedCacheKey: string | ArrayBuffer, encodedArguments: FormData | string, fn: any -): Promise { +): Promise { // We need to run this inside a clean AsyncLocalStorage snapshot so that the cache // generation cannot read anything from the context we're currently executing which // might include request specific things like cookies() inside a React.cache(). @@ -92,6 +101,7 @@ function generateCacheEntry( return runInCleanSnapshot( generateCacheEntryWithRestoredWorkStore, workStore, + outerWorkUnitStore, clientReferenceManifest, cacheHandler, serializedCacheKey, @@ -102,6 +112,7 @@ function generateCacheEntry( function generateCacheEntryWithRestoredWorkStore( workStore: WorkStore, + outerWorkUnitStore: WorkUnitStore | undefined, clientReferenceManifest: DeepReadonly, cacheHandler: CacheHandler, serializedCacheKey: string | ArrayBuffer, @@ -119,6 +130,7 @@ function generateCacheEntryWithRestoredWorkStore( workStore, generateCacheEntryWithCacheContext, workStore, + outerWorkUnitStore, clientReferenceManifest, cacheHandler, serializedCacheKey, @@ -129,6 +141,7 @@ function generateCacheEntryWithRestoredWorkStore( function generateCacheEntryWithCacheContext( workStore: WorkStore, + outerWorkUnitStore: WorkUnitStore | undefined, clientReferenceManifest: DeepReadonly, cacheHandler: CacheHandler, serializedCacheKey: string | ArrayBuffer, @@ -138,13 +151,16 @@ function generateCacheEntryWithCacheContext( // Initialize the Store for this Cache entry. const cacheStore: UseCacheStore = { type: 'cache', - revalidate: INFINITE_CACHE, + revalidate: defaultCacheLife.revalidate, + explicitRevalidate: undefined, tags: null, } return workUnitAsyncStorage.run( cacheStore, generateCacheEntryImpl, workStore, + outerWorkUnitStore, + cacheStore, clientReferenceManifest, cacheHandler, serializedCacheKey, @@ -153,8 +169,95 @@ function generateCacheEntryWithCacheContext( ) } +function propagateCacheLifeAndTags( + workUnitStore: WorkUnitStore | undefined, + entry: CacheEntry +): void { + if ( + workUnitStore && + (workUnitStore.type === 'cache' || + workUnitStore.type === 'prerender' || + workUnitStore.type === 'prerender-ppr' || + workUnitStore.type === 'prerender-legacy') + ) { + // Propagate tags and revalidate upwards + const outerTags = workUnitStore.tags ?? (workUnitStore.tags = []) + const entryTags = entry.tags + for (let i = 0; i < entryTags.length; i++) { + const tag = entryTags[i] + if (!outerTags.includes(tag)) { + outerTags.push(tag) + } + } + if (workUnitStore.revalidate > entry.revalidate) { + workUnitStore.revalidate = entry.revalidate + } + } +} + +async function collectResult( + savedStream: ReadableStream, + outerWorkUnitStore: WorkUnitStore | undefined, + innerCacheStore: UseCacheStore, + errors: Array // This is a live array that gets pushed into. +): Promise { + // We create a buffered stream that collects all chunks until the end to + // ensure that RSC has finished rendering and therefore we have collected + // all tags. In the future the RSC API might allow for the equivalent of + // the allReady Promise that exists on SSR streams. + // + // If something errored or rejected anywhere in the render, we close + // the stream as errored. This lets a CacheHandler choose to save the + // partial result up until that point for future hits for a while to avoid + // unnecessary retries or not to retry. We use the end of the stream for + // this to avoid another complicated side-channel. A receiver has to consider + // that the stream might also error for other reasons anyway such as losing + // connection. + + const buffer: any[] = [] + const reader = savedStream.getReader() + for (let entry; !(entry = await reader.read()).done; ) { + buffer.push(entry.value) + } + + let idx = 0 + const bufferStream = new ReadableStream({ + pull(controller) { + if (idx < buffer.length) { + controller.enqueue(buffer[idx++]) + } else if (errors.length > 0) { + // TODO: Should we use AggregateError here? + controller.error(errors[0]) + } else { + controller.close() + } + }, + }) + + const collectedTags = innerCacheStore.tags + // If cacheLife() was used to set an explicit revalidate time we use that. + // Otherwise, we use the lowest of all inner fetch()/unstable_cache() or nested "use cache". + // If they're lower than our default. + const collectedRevalidate = + innerCacheStore.explicitRevalidate !== undefined + ? innerCacheStore.explicitRevalidate + : innerCacheStore.revalidate + + const entry = { + value: bufferStream, + stale: false, // TODO: rm + tags: collectedTags === null ? [] : collectedTags, + revalidate: collectedRevalidate, + } + // Propagate tags/revalidate to the parent context. + propagateCacheLifeAndTags(outerWorkUnitStore, entry) + return entry +} + async function generateCacheEntryImpl( workStore: WorkStore, + outerWorkUnitStore: WorkUnitStore | undefined, + innerCacheStore: UseCacheStore, clientReferenceManifest: DeepReadonly, cacheHandler: CacheHandler, serializedCacheKey: string | ArrayBuffer, @@ -174,8 +277,7 @@ async function generateCacheEntryImpl( // Invoke the inner function to load a new result. const result = fn.apply(null, args) - let didError = false - let firstError: any = null + let errors: Array = [] const stream = renderToReadableStream( result, @@ -183,52 +285,28 @@ async function generateCacheEntryImpl( { environmentName: 'Cache', temporaryReferences, - onError(error: any) { + onError(error: unknown) { // Report the error. console.error(error) - if (!didError) { - didError = true - firstError = error - } + errors.push(error) }, } ) const [returnStream, savedStream] = stream.tee() - // We create a stream that passed through the RSC render of the response. - // It always runs to completion but at the very end, if something errored - // or rejected anywhere in the render. We close the stream as errored. - // This lets a CacheHandler choose to save the errored result for future - // hits for a while to avoid unnecessary retries or not to retry. - // We use the end of the stream for this to avoid another complicated - // side-channel. A receiver has to consider that the stream might also - // error for other reasons anyway such as losing connection. - const reader = savedStream.getReader() - const erroringSavedStream = new ReadableStream({ - pull(controller) { - return reader.read().then(({ done, value }) => { - if (done) { - if (didError) { - controller.error(firstError) - } else { - controller.close() - } - return - } - controller.enqueue(value) - }) - }, - cancel(reason: any) { - reader.cancel(reason) - }, - }) + const cacheEntry = collectResult( + savedStream, + outerWorkUnitStore, + innerCacheStore, + errors + ) if (!workStore.pendingRevalidateWrites) { workStore.pendingRevalidateWrites = [] } - const promise = cacheHandler.set(serializedCacheKey, erroringSavedStream) + const promise = cacheHandler.set(serializedCacheKey, cacheEntry) workStore.pendingRevalidateWrites.push(promise) @@ -258,6 +336,8 @@ export function cache(kind: string, id: string, fn: any) { ) } + const workUnitStore = workUnitAsyncStorage.getStore() + // Because the Action ID is not yet unique per implementation of that Action we can't // safely reuse the results across builds yet. In the meantime we add the buildId to the // arguments as a seed to ensure they're not reused. Remove this once Action IDs hash @@ -309,6 +389,7 @@ export function cache(kind: string, id: string, fn: any) { stream = await generateCacheEntry( workStore, + workUnitStore, clientReferenceManifestSingleton, cacheHandler, serializedCacheKey, @@ -317,11 +398,15 @@ export function cache(kind: string, id: string, fn: any) { ) } else { stream = entry.value + + propagateCacheLifeAndTags(workUnitStore, entry) + if (entry.stale) { // If this is stale, and we're not in a prerender (i.e. this is dynamic render), // then we should warm up the cache with a fresh revalidated entry. const ignoredStream = await generateCacheEntry( workStore, + workUnitStore, clientReferenceManifestSingleton, cacheHandler, serializedCacheKey, @@ -350,6 +435,7 @@ export function cache(kind: string, id: string, fn: any) { ? clientReferenceManifestSingleton.edgeRscModuleMapping : clientReferenceManifestSingleton.rscModuleMapping, } + return createFromReadableStream(stream, { ssrManifest, temporaryReferences, From 58ab93b1fb794a7656bc7451e8de28e818d0acf1 Mon Sep 17 00:00:00 2001 From: Benjamin Woodruff Date: Thu, 10 Oct 2024 10:31:21 -0700 Subject: [PATCH 6/8] chore(turbo-tasks-backend): Clean up internal macros (#71061) Figured it was easier to fix these myself than to try to suggest changes on #70798: - Fix some inconsistency inside `iter_many` about how derefing happens depending on how arguments are used. - Simplify `get_many`'s argument passthrough to `iter_many` using a `tt` group: https://veykril.github.io/tlborm/decl-macros/patterns/tt-bundling.html - Manually line-wrap macros, which rustfmt does not do. - Rename some of `iter_many` arguments (the name "`value`" was a little too overloaded here) and add some really basic documentation. - Eliminate a few branches using `?` repetitions instead: https://veykril.github.io/tlborm/decl-macros/macros-methodical.html#repetitions - The `value_pattern` (previously `value_ident`) of `iter_many` can technically be any pattern (even though nothing currently needs that), so broaden the type from `ident` to `tt`. --- .../turbo-tasks-backend/src/backend/mod.rs | 20 ++++- .../backend/operation/aggregation_update.rs | 20 ++++- .../src/backend/storage.rs | 74 ++++++++----------- 3 files changed, 62 insertions(+), 52 deletions(-) diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs index b7f6199ca0299..2d3b594ba1fa3 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs @@ -1313,14 +1313,26 @@ impl TurboTasksBackendInner { ); task = ctx.task(task_id, TaskDataCategory::All); } - for collectible in iter_many!(task, AggregatedCollectible { collectible } count if collectible.collectible_type == collectible_type && count > 0 => collectible.cell) - { + for collectible in iter_many!( + task, + AggregatedCollectible { + collectible + } count if collectible.collectible_type == collectible_type && count > 0 => { + collectible.cell + } + ) { *collectibles .entry(RawVc::TaskCell(collectible.task, collectible.cell)) .or_insert(0) += 1; } - for (collectible, count) in iter_many!(task, Collectible { collectible } count if collectible.collectible_type == collectible_type => (collectible.cell, count)) - { + for (collectible, count) in iter_many!( + task, + Collectible { + collectible + } count if collectible.collectible_type == collectible_type => { + (collectible.cell, count) + } + ) { *collectibles .entry(RawVc::TaskCell(collectible.task, collectible.cell)) .or_insert(0) += count; diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs index a6b2b05664bdb..5e1a6133de45e 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs @@ -133,8 +133,14 @@ impl AggregatedDataUpdate { if dirty_container_count > 0 { dirty = true; } - for collectible in iter_many!(task, AggregatedCollectible { collectible } count if count > 0 => collectible) - { + for collectible in iter_many!( + task, + AggregatedCollectible { + collectible + } count if count > 0 => { + collectible + } + ) { collectibles_update.push((collectible, 1)); } } @@ -249,7 +255,15 @@ impl AggregatedDataUpdate { ); if added || removed { let ty = collectible.collectible_type; - let dependent: SmallVec<[TaskId; 4]> = get_many!(task, CollectiblesDependent { collectible_type, task } if *collectible_type == ty => *task); + let dependent: SmallVec<[TaskId; 4]> = get_many!( + task, + CollectiblesDependent { + collectible_type, + task, + } if collectible_type == ty => { + task + } + ); if !dependent.is_empty() { queue.push(AggregationUpdateJob::Invalidate { task_ids: dependent, diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs b/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs index bf7aa5360663f..64025dad05524 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs @@ -396,7 +396,9 @@ where macro_rules! get { ($task:ident, $key:ident $input:tt) => { - if let Some($crate::data::CachedDataItemValue::$key { value }) = $task.get(&$crate::data::CachedDataItemKey::$key $input).as_ref() { + if let Some($crate::data::CachedDataItemValue::$key { + value, + }) = $task.get(&$crate::data::CachedDataItemKey::$key $input).as_ref() { Some(value) } else { None @@ -409,7 +411,9 @@ macro_rules! get { macro_rules! get_mut { ($task:ident, $key:ident $input:tt) => { - if let Some($crate::data::CachedDataItemValue::$key { value }) = $task.get_mut(&$crate::data::CachedDataItemKey::$key $input).as_mut() { + if let Some($crate::data::CachedDataItemValue::$key { + value, + }) = $task.get_mut(&$crate::data::CachedDataItemKey::$key $input).as_mut() { let () = $crate::data::allow_mut_access::$key; Some(value) } else { @@ -421,64 +425,42 @@ macro_rules! get_mut { }; } +/// Creates an iterator over all [`CachedDataItemKey::$key`][crate::data::CachedDataItemKey]s in +/// `$task` matching the given `$key_pattern`, optional `$value_pattern`, and optional `if $cond`. +/// +/// Each element in the iterator is determined by `$iter_item`, which may use fields extracted by +/// `$key_pattern` or `$value_pattern`. macro_rules! iter_many { - ($task:ident, $key:ident $input:tt => $value:expr) => { - $task - .iter($crate::data::indicies::$key) - .filter_map(|(key, _)| match *key { - $crate::data::CachedDataItemKey::$key $input => Some($value), - _ => None, - }) - }; - ($task:ident, $key:ident $input:tt => $value:expr) => { - $task - .iter($crate::data::indicies::$key) - .filter_map(|(key, _)| match key { - $crate::data::CachedDataItemKey::$key $input => Some($value), - _ => None, - }) - }; - ($task:ident, $key:ident $input:tt if $cond:expr => $value:expr) => { + ($task:ident, $key:ident $key_pattern:tt $(if $cond:expr)? => $iter_item:expr) => { $task .iter($crate::data::indicies::$key) .filter_map(|(key, _)| match key { - $crate::data::CachedDataItemKey::$key $input if $cond => Some($value), + &$crate::data::CachedDataItemKey::$key $key_pattern $(if $cond)? => Some( + $iter_item + ), _ => None, }) }; - ($task:ident, $key:ident $input:tt $value_ident:ident => $value:expr) => { + ($task:ident, $key:ident $input:tt $value_pattern:tt $(if $cond:expr)? => $iter_item:expr) => { $task .iter($crate::data::indicies::$key) .filter_map(|(key, value)| match (key, value) { - (&$crate::data::CachedDataItemKey::$key $input, &$crate::data::CachedDataItemValue::$key { value: $value_ident }) => Some($value), - _ => None, - }) - }; - ($task:ident, $key:ident $input:tt $value_ident:ident if $cond:expr => $value:expr) => { - $task - .iter($crate::data::indicies::$key) - .filter_map(|(key, value)| match (key, value) { - (&$crate::data::CachedDataItemKey::$key $input, &$crate::data::CachedDataItemValue::$key { value: $value_ident }) if $cond => Some($value), + ( + &$crate::data::CachedDataItemKey::$key $input, + &$crate::data::CachedDataItemValue::$key { value: $value_pattern } + ) $(if $cond)? => Some($iter_item), _ => None, }) }; } +/// A thin wrapper around [`iter_many`] that calls [`Iterator::collect`]. +/// +/// Note that the return type of [`Iterator::collect`] may be ambiguous in certain contexts, so +/// using this macro may require explicit type annotations on variables. macro_rules! get_many { - ($task:ident, $key:ident $input:tt => $value:expr) => { - $crate::backend::storage::iter_many!($task, $key $input => $value).collect() - }; - ($task:ident, $key:ident $input:tt => $value:expr) => { - $crate::backend::storage::iter_many!($task, $key $input => $value).collect() - }; - ($task:ident, $key:ident $input:tt if $cond:expr => $value:expr) => { - $crate::backend::storage::iter_many!($task, $key $input if $cond => $value).collect() - }; - ($task:ident, $key:ident $input:tt $value_ident:ident => $value:expr) => { - $crate::backend::storage::iter_many!($task, $key $input $value_ident => $value).collect() - }; - ($task:ident, $key:ident $input:tt $value_ident:ident if $cond:expr => $value:expr) => { - $crate::backend::storage::iter_many!($task, $key $input $value_ident if $cond => $value).collect() + ($($args:tt)*) => { + $crate::backend::storage::iter_many!($($args)*).collect() }; } @@ -529,7 +511,9 @@ macro_rules! update_count { macro_rules! remove { ($task:ident, $key:ident $input:tt) => { - if let Some($crate::data::CachedDataItemValue::$key { value }) = $task.remove(&$crate::data::CachedDataItemKey::$key $input) { + if let Some($crate::data::CachedDataItemValue::$key { value }) = $task.remove( + &$crate::data::CachedDataItemKey::$key $input + ) { Some(value) } else { None From b8db5e2e7c8847fcb137da7dfd24b52b2b6a87ee Mon Sep 17 00:00:00 2001 From: Tobias Koppers Date: Thu, 10 Oct 2024 20:27:33 +0200 Subject: [PATCH 7/8] change default module id strategy back to dev (#71052) ### What? Disable deterministic module ids by default until the performance issue is solved. --- crates/next-api/src/project.rs | 2 +- test/integration/module-id-strategies/next.config.js | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/next-api/src/project.rs b/crates/next-api/src/project.rs index 0a91d003814ac..bee43fb477d4f 100644 --- a/crates/next-api/src/project.rs +++ b/crates/next-api/src/project.rs @@ -1281,7 +1281,7 @@ impl Project { } None => match *self.next_mode().await? { NextMode::Development => Ok(Vc::upcast(DevModuleIdStrategy::new())), - NextMode::Build => Ok(Vc::upcast(GlobalModuleIdStrategyBuilder::build(self))), + NextMode::Build => Ok(Vc::upcast(DevModuleIdStrategy::new())), }, } } diff --git a/test/integration/module-id-strategies/next.config.js b/test/integration/module-id-strategies/next.config.js index 40c2e5b7e65a1..c535948b13b6f 100644 --- a/test/integration/module-id-strategies/next.config.js +++ b/test/integration/module-id-strategies/next.config.js @@ -1,4 +1,10 @@ module.exports = { bundlePagesRouterDependencies: true, serverExternalPackages: ['opted-out-external-package'], + experimental: { + turbo: { + moduleIdStrategy: + process.env.NODE_ENV === 'production' ? 'deterministic' : undefined, + }, + }, } From 5bd42716c69503e7361eba43151e85124a960056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EB=A3=A8=EB=B0=80LuMir?= Date: Fri, 11 Oct 2024 03:33:10 +0900 Subject: [PATCH 8/8] docs(javascript): fix wrong javascript code in `11-parallel-routes.mdx` (#71074) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## 📚 Improving Documentation Hello, I've fixed an wrong JavaScript code in [`11-parallel-routes.mdx`](https://nextjs.org/docs/app/building-your-application/routing/parallel-routes). There's no need to use type declaration in JavaScript code. So I deleted it. --------- Co-authored-by: Zack Tanner <1939140+ztanner@users.noreply.github.com> --- .../01-routing/11-parallel-routes.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/02-app/01-building-your-application/01-routing/11-parallel-routes.mdx b/docs/02-app/01-building-your-application/01-routing/11-parallel-routes.mdx index a706751c7c3f0..755adbe87755e 100644 --- a/docs/02-app/01-building-your-application/01-routing/11-parallel-routes.mdx +++ b/docs/02-app/01-building-your-application/01-routing/11-parallel-routes.mdx @@ -202,7 +202,7 @@ export default function Layout({ children }: { children: React.ReactNode }) { ```jsx filename="app/@analytics/layout.js" switcher import Link from 'next/link' -export default function Layout({ children }: { children: React.ReactNode }) { +export default function Layout({ children }) { return ( <>