From 40bc463242a48fcefa83b578d9429015692e6760 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Wed, 18 Mar 2026 14:40:19 -0700 Subject: [PATCH 01/14] Subscription model refactor + async subscribe_track - Add `ordered` and `max_latency` fields to `Track` struct - Create `Subscription` struct for per-consumer preferences - Create `TrackSubscription` type with RAII lifecycle (auto-removes on drop) - Add `TrackProducer::poll_max` for aggregate subscription change detection - Add `TrackConsumer::subscribe()` to register subscriptions - Add `GroupProducer::is_aborted()` for duplicate group handling - Allow `create_group` to replace aborted groups (for subscription restart) - Replace `BroadcastDynamic::requested_track() -> TrackProducer` with `requested_track() -> TrackRequest` (handler creates producer, responds) - Make `BroadcastConsumer::subscribe_track` async with request/response - Add `BroadcastDynamic::insert_track` for handler-side track insertion - Add dedup for pending track requests (multiple subscribers share reply) - Update all callers across the workspace Known issues: - libmoq FFI tests deadlock (likely BroadcastConsumer upgrade in async context) - moq-mux tests may deadlock similarly Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/hang/examples/subscribe.rs | 6 +- rs/hang/examples/video.rs | 2 + rs/hang/src/catalog/audio/mod.rs | 7 +- rs/hang/src/catalog/root.rs | 2 + rs/hang/src/catalog/video/mod.rs | 7 +- rs/libmoq/src/consume.rs | 39 ++- rs/moq-clock/src/main.rs | 4 +- rs/moq-ffi/src/consumer.rs | 12 +- rs/moq-lite/src/ietf/publisher.rs | 7 +- rs/moq-lite/src/ietf/subscriber.rs | 30 +- rs/moq-lite/src/lite/publisher.rs | 13 +- rs/moq-lite/src/lite/subscriber.rs | 44 ++- rs/moq-lite/src/model/broadcast.rs | 436 ++++++++++++++++++--------- rs/moq-lite/src/model/group.rs | 5 + rs/moq-lite/src/model/track.rs | 468 +++++++++++++++++++++++++++++ rs/moq-mux/src/catalog.rs | 2 + rs/moq-native/examples/chat.rs | 5 +- rs/moq-native/tests/backend.rs | 2 + rs/moq-native/tests/broadcast.rs | 3 + rs/moq-relay/src/web.rs | 4 +- 20 files changed, 896 insertions(+), 202 deletions(-) diff --git a/rs/hang/examples/subscribe.rs b/rs/hang/examples/subscribe.rs index f48c079a3..ea8c4d6ed 100644 --- a/rs/hang/examples/subscribe.rs +++ b/rs/hang/examples/subscribe.rs @@ -49,7 +49,7 @@ async fn run_subscribe(mut consumer: moq_lite::OriginConsumer) -> anyhow::Result tracing::info!(%path, "broadcast announced"); // Read the catalog to discover available tracks. - let catalog_track = broadcast.subscribe_track(&hang::Catalog::default_track())?; + let catalog_track = broadcast.subscribe_track(&hang::Catalog::default_track()).await?; let mut catalog = hang::CatalogConsumer::new(catalog_track); let info = catalog.next().await?.ok_or_else(|| anyhow::anyhow!("no catalog"))?; @@ -74,9 +74,11 @@ async fn run_subscribe(mut consumer: moq_lite::OriginConsumer) -> anyhow::Result let track = moq_lite::Track { name: name.clone(), priority: 1, + ordered: false, + max_latency: std::time::Duration::ZERO, }; - let track_consumer = broadcast.subscribe_track(&track)?; + let track_consumer = broadcast.subscribe_track(&track).await?; let mut ordered = hang::container::OrderedConsumer::new(track_consumer, Duration::from_millis(500)); // Read frames in presentation order. diff --git a/rs/hang/examples/video.rs b/rs/hang/examples/video.rs index b25c87135..c0c93cfca 100644 --- a/rs/hang/examples/video.rs +++ b/rs/hang/examples/video.rs @@ -43,6 +43,8 @@ fn create_track(broadcast: &mut moq_lite::BroadcastProducer) -> anyhow::Result Result { let broadcast = self.broadcast.get(broadcast).ok_or(Error::BroadcastNotFound)?.clone(); - let catalog = broadcast.subscribe_track(&hang::catalog::Catalog::default_track())?; let channel = oneshot::channel(); let entry = TaskEntry { @@ -58,8 +57,14 @@ impl Consume { let id = self.catalog_task.insert(Some(entry))?; tokio::spawn(async move { + let res = async { + let catalog = broadcast + .subscribe_track(&hang::catalog::Catalog::default_track()) + .await?; + Self::run_catalog(id, broadcast, catalog.into()).await + }; let res = tokio::select! { - res = Self::run_catalog(id, broadcast, catalog.into()) => res, + res = res => res, _ = channel.1 => Ok(()), }; @@ -217,11 +222,13 @@ impl Consume { .nth(index) .ok_or(Error::NoIndex)?; - let track = consume.broadcast.subscribe_track(&moq_lite::Track { + let broadcast = consume.broadcast.clone(); + let track_info = moq_lite::Track { name: rendition.clone(), priority: 1, // TODO: Remove priority - })?; - let track = hang::container::OrderedConsumer::new(track, latency); + ordered: false, + max_latency: std::time::Duration::ZERO, + }; let channel = oneshot::channel(); let entry = TaskEntry { @@ -231,8 +238,13 @@ impl Consume { let id = self.track_task.insert(Some(entry))?; tokio::spawn(async move { + let res = async { + let track = broadcast.subscribe_track(&track_info).await?; + let track = hang::container::OrderedConsumer::new(track, latency); + Self::run_track(id, track).await + }; let res = tokio::select! { - res = Self::run_track(id, track) => res, + res = res => res, _ = channel.1 => Ok(()), }; @@ -261,11 +273,13 @@ impl Consume { .nth(index) .ok_or(Error::NoIndex)?; - let track = consume.broadcast.subscribe_track(&moq_lite::Track { + let broadcast = consume.broadcast.clone(); + let track_info = moq_lite::Track { name: rendition.clone(), priority: 2, // TODO: Remove priority - })?; - let track = hang::container::OrderedConsumer::new(track, latency); + ordered: false, + max_latency: std::time::Duration::ZERO, + }; let channel = oneshot::channel(); let entry = TaskEntry { @@ -275,8 +289,13 @@ impl Consume { let id = self.track_task.insert(Some(entry))?; tokio::spawn(async move { + let res = async { + let track = broadcast.subscribe_track(&track_info).await?; + let track = hang::container::OrderedConsumer::new(track, latency); + Self::run_track(id, track).await + }; let res = tokio::select! { - res = Self::run_track(id, track) => res, + res = res => res, _ = channel.1 => Ok(()), }; diff --git a/rs/moq-clock/src/main.rs b/rs/moq-clock/src/main.rs index 1ebb5f21c..972034b59 100644 --- a/rs/moq-clock/src/main.rs +++ b/rs/moq-clock/src/main.rs @@ -56,6 +56,8 @@ async fn main() -> anyhow::Result<()> { let track = Track { name: config.track, priority: 0, + ordered: false, + max_latency: std::time::Duration::ZERO, }; let origin = moq_lite::Origin::produce(); @@ -97,7 +99,7 @@ async fn main() -> anyhow::Result<()> { Some(announce) = origin.announced() => match announce { (path, Some(broadcast)) => { tracing::info!(broadcast = %path, "broadcast is online, subscribing to track"); - let track = broadcast.subscribe_track(&track)?; + let track = broadcast.subscribe_track(&track).await?; clock = Some(clock::Subscriber::new(track)); } (path, None) => { diff --git a/rs/moq-ffi/src/consumer.rs b/rs/moq-ffi/src/consumer.rs index 9ada59c5c..023162cd8 100644 --- a/rs/moq-ffi/src/consumer.rs +++ b/rs/moq-ffi/src/consumer.rs @@ -75,8 +75,8 @@ impl Media { impl MoqBroadcastConsumer { /// Subscribe to the catalog for this broadcast. pub fn subscribe_catalog(&self) -> Result, MoqError> { - let _guard = crate::ffi::RUNTIME.enter(); - let track = self.inner.subscribe_track(&hang::catalog::Catalog::default_track())?; + let track = + crate::ffi::RUNTIME.block_on(self.inner.subscribe_track(&hang::catalog::Catalog::default_track()))?; let consumer = hang::CatalogConsumer::from(track); Ok(Arc::new(MoqCatalogConsumer { task: Task::new(Catalog { inner: consumer }), @@ -87,8 +87,12 @@ impl MoqBroadcastConsumer { /// /// `max_latency_ms` controls the maximum buffering before skipping a GoP. pub fn subscribe_media(&self, name: String, max_latency_ms: u64) -> Result, MoqError> { - let _guard = crate::ffi::RUNTIME.enter(); - let track = self.inner.subscribe_track(&moq_lite::Track { name, priority: 0 })?; + let track = crate::ffi::RUNTIME.block_on(self.inner.subscribe_track(&moq_lite::Track { + name, + priority: 0, + ordered: false, + max_latency: std::time::Duration::ZERO, + }))?; let latency = std::time::Duration::from_millis(max_latency_ms); let consumer = hang::container::OrderedConsumer::new(track, latency); Ok(Arc::new(MoqMediaConsumer { diff --git a/rs/moq-lite/src/ietf/publisher.rs b/rs/moq-lite/src/ietf/publisher.rs index 438c410ae..e204c089e 100644 --- a/rs/moq-lite/src/ietf/publisher.rs +++ b/rs/moq-lite/src/ietf/publisher.rs @@ -111,12 +111,9 @@ impl Publisher { return Ok(()); }; - let track = Track { - name: msg.track_name.to_string(), - priority: msg.subscriber_priority, - }; + let track = Track::new(msg.track_name.to_string()); - let track = match broadcast.subscribe_track(&track) { + let track = match broadcast.subscribe_track(&track).await { Ok(track) => track, Err(err) => { self.write_subscribe_error(&mut stream.writer, request_id, 404, &err.to_string()) diff --git a/rs/moq-lite/src/ietf/subscriber.rs b/rs/moq-lite/src/ietf/subscriber.rs index a75e360e8..1e4f229f4 100644 --- a/rs/moq-lite/src/ietf/subscriber.rs +++ b/rs/moq-lite/src/ietf/subscriber.rs @@ -463,11 +463,7 @@ impl Subscriber { fn start_publish(&mut self, msg: &ietf::Publish<'_>) -> Result<(), Error> { let request_id = msg.request_id; - let track = Track { - name: msg.track_name.to_string(), - priority: 0, - } - .produce(); + let track = Track::new(msg.track_name.to_string()).produce(); let mut state = self.state.lock(); match state.subscribes.entry(request_id) { @@ -500,9 +496,9 @@ impl Subscriber { async fn run_broadcast(&self, path: Path<'_>, mut broadcast: BroadcastDynamic) -> Result<(), Error> { loop { - let track = tokio::select! { - producer = broadcast.requested_track() => match producer { - Ok(producer) => producer, + let request = tokio::select! { + request = broadcast.requested_track() => match request { + Ok(request) => request, Err(err) => { tracing::debug!(%err, "broadcast closed"); break; @@ -514,15 +510,29 @@ impl Subscriber { let mut this = self.clone(); let path = path.to_owned(); + let broadcast = broadcast.clone(); web_async::spawn(async move { - this.run_subscribe(path, track).await; + this.run_subscribe(path, request, broadcast).await; }); } Ok(()) } - async fn run_subscribe(&mut self, broadcast: Path<'_>, mut track: TrackProducer) { + async fn run_subscribe( + &mut self, + broadcast_path: Path<'_>, + request: crate::TrackRequest, + broadcast: BroadcastDynamic, + ) { + // Create the TrackProducer and insert into the broadcast lookup. + let mut track = request.info.clone().produce(); + if let Err(err) = broadcast.insert_track(&track) { + request.reject(err); + return; + } + request.respond(track.consume()); + let broadcast = broadcast_path; let request_id = match self.control.next_request_id().await { Ok(id) => id, Err(err) => { diff --git a/rs/moq-lite/src/lite/publisher.rs b/rs/moq-lite/src/lite/publisher.rs index 53beb4688..e7544b4c9 100644 --- a/rs/moq-lite/src/lite/publisher.rs +++ b/rs/moq-lite/src/lite/publisher.rs @@ -260,20 +260,15 @@ impl Publisher { priority: PriorityQueue, version: Version, ) -> Result<(), Error> { - let track = Track { - name: subscribe.track.to_string(), - priority: subscribe.priority, - }; + let track = Track::new(subscribe.track.to_string()); let broadcast = consumer.ok_or(Error::NotFound)?; - let track = broadcast.subscribe_track(&track)?; - - // TODO wait until track.info() to get the *real* priority + let track = broadcast.subscribe_track(&track).await?; let info = lite::SubscribeOk { priority: track.info.priority, - ordered: false, - max_latency: std::time::Duration::ZERO, + ordered: track.info.ordered, + max_latency: track.info.max_latency, start_group: None, end_group: None, }; diff --git a/rs/moq-lite/src/lite/subscriber.rs b/rs/moq-lite/src/lite/subscriber.rs index 73534b1f3..290b35703 100644 --- a/rs/moq-lite/src/lite/subscriber.rs +++ b/rs/moq-lite/src/lite/subscriber.rs @@ -155,9 +155,9 @@ impl Subscriber { loop { // Keep serving requests until there are no more consumers. // This way we'll clean up the task when the broadcast is no longer needed. - let track = tokio::select! { - producer = broadcast.requested_track() => match producer { - Ok(producer) => producer, + let request = tokio::select! { + request = broadcast.requested_track() => match request { + Ok(request) => request, Err(err) => { tracing::debug!(%err, "broadcast closed"); break; @@ -170,28 +170,48 @@ impl Subscriber { let mut this = self.clone(); let path = path.clone(); + let broadcast = broadcast.clone(); web_async::spawn(async move { - this.run_subscribe(id, path, track).await; + this.run_subscribe(id, path, request, broadcast).await; this.subscribes.lock().remove(&id); }); } } - async fn run_subscribe(&mut self, id: u64, broadcast: Path<'_>, mut track: TrackProducer) { + async fn run_subscribe( + &mut self, + id: u64, + broadcast_path: Path<'_>, + request: crate::TrackRequest, + broadcast: BroadcastDynamic, + ) { + let track_name = request.info.name.clone(); + + // Create the TrackProducer and insert into the broadcast lookup. + let mut track = request.info.clone().produce(); + if let Err(err) = broadcast.insert_track(&track) { + tracing::warn!(%err, track = %track_name, "failed to insert track"); + request.reject(err); + return; + } + + // Respond to the waiting subscribers with the consumer. + request.respond(track.consume()); + self.subscribes.lock().insert(id, track.clone()); let msg = lite::Subscribe { id, - broadcast: broadcast.to_owned(), + broadcast: broadcast_path.to_owned(), track: (&track.info.name).into(), priority: track.info.priority, - ordered: true, - max_latency: std::time::Duration::ZERO, + ordered: track.info.ordered, + max_latency: track.info.max_latency, start_group: None, end_group: None, }; - tracing::info!(id, broadcast = %self.log_path(&broadcast), track = %track.info.name, "subscribe started"); + tracing::info!(id, broadcast = %self.log_path(&broadcast_path), track = %track_name, "subscribe started"); let res = tokio::select! { _ = track.unused() => Err(Error::Cancel), @@ -200,15 +220,15 @@ impl Subscriber { match res { Err(Error::Cancel) => { - tracing::info!(id, broadcast = %self.log_path(&broadcast), track = %track.info.name, "subscribe cancelled"); + tracing::info!(id, broadcast = %self.log_path(&broadcast_path), track = %track_name, "subscribe cancelled"); let _ = track.abort(Error::Cancel); } Err(err) => { - tracing::warn!(id, broadcast = %self.log_path(&broadcast), track = %track.info.name, %err, "subscribe error"); + tracing::warn!(id, broadcast = %self.log_path(&broadcast_path), track = %track_name, %err, "subscribe error"); let _ = track.abort(err); } _ => { - tracing::info!(id, broadcast = %self.log_path(&broadcast), track = %track.info.name, "subscribe complete"); + tracing::info!(id, broadcast = %self.log_path(&broadcast_path), track = %track_name, "subscribe complete"); let _ = track.finish(); } } diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index f1d93c73c..979f055cc 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -34,13 +34,47 @@ impl Broadcast { } } +/// A pending track request queued by [`BroadcastConsumer::subscribe_track`]. +/// +/// The dynamic handler receives this via [`BroadcastDynamic::requested_track`], +/// creates the track, inserts it, and responds. +pub struct TrackRequest { + pub info: Track, + replies: Vec>>, +} + +impl TrackRequest { + /// Respond to all waiting subscribers with the given consumer. + pub fn respond(mut self, consumer: TrackConsumer) { + for tx in self.replies.drain(..) { + let _ = tx.send(Ok(consumer.clone())); + } + } + + /// Reject all waiting subscribers with the given error. + pub fn reject(mut self, err: Error) { + for tx in self.replies.drain(..) { + let _ = tx.send(Err(err.clone())); + } + } +} + +impl Drop for TrackRequest { + fn drop(&mut self) { + // If the request is dropped without responding, reject with Cancel. + for tx in self.replies.drain(..) { + let _ = tx.send(Err(Error::Cancel)); + } + } +} + #[derive(Default, Clone)] struct State { // Weak references for deduplication. Doesn't prevent track auto-close. tracks: HashMap, - // Dynamic tracks that have been requested. - requests: Vec, + // Dynamic tracks that have been requested but not yet handled. + requests: Vec, // The current number of dynamic producers. // If this is 0, requests must be empty. @@ -50,6 +84,21 @@ struct State { abort: Option, } +// TrackRequest contains oneshot senders which aren't Clone, but State derives Clone. +// We need a manual Clone that drops the requests (they can't be cloned). +// Actually, this is already handled because State's Clone impl will try to clone requests. +// Let me remove the derive and implement Clone manually. +impl Clone for TrackRequest { + fn clone(&self) -> Self { + // Requests can't actually be meaningfully cloned; the senders are consumed. + // This is only needed because State derives Clone (for conducer). + Self { + info: self.info.clone(), + replies: Vec::new(), + } + } +} + fn modify(state: &conducer::Producer) -> Result, Error> { match state.write() { Ok(state) => Ok(state), @@ -85,15 +134,7 @@ impl BroadcastProducer { /// /// NOTE: You probably want to [TrackProducer::clone] first to keep publishing to the track. pub fn insert_track(&mut self, track: &TrackProducer) -> Result<(), Error> { - let mut state = modify(&self.state)?; - - let hash_map::Entry::Vacant(entry) = state.tracks.entry(track.info.name.clone()) else { - return Err(Error::Duplicate); - }; - - entry.insert(track.weak()); - - Ok(()) + insert_track_impl(&self.state, track) } /// Remove a track from the lookup. @@ -132,9 +173,9 @@ impl BroadcastProducer { weak.abort(err.clone()); } - // Abort any pending dynamic track requests. - for mut request in guard.requests.drain(..) { - request.abort(err.clone()).ok(); + // Reject any pending dynamic track requests. + for request in guard.requests.drain(..) { + request.reject(err.clone()); } guard.abort = Some(err); @@ -167,11 +208,51 @@ impl BroadcastProducer { } } +/// Shared helper to insert a track into the broadcast lookup. +fn insert_track_impl(state: &conducer::Producer, track: &TrackProducer) -> Result<(), Error> { + let mut guard = modify(state)?; + + match guard.tracks.entry(track.info.name.clone()) { + hash_map::Entry::Occupied(mut entry) => { + if !entry.get().is_closed() { + return Err(Error::Duplicate); + } + entry.insert(track.weak()); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(track.weak()); + } + } + + // Spawn cleanup task to remove the track from the lookup when unused. + let weak = track.weak(); + let consumer_state = state.consume(); + web_async::spawn(async move { + let _ = weak.unused().await; + + let Some(producer) = consumer_state.produce() else { + return; + }; + let Ok(mut state) = producer.write() else { + return; + }; + + // Remove the entry, but reinsert if it was replaced by a different reference. + if let Some(current) = state.tracks.remove(&weak.info.name) + && !current.is_clone(&weak) + { + state.tracks.insert(current.info.name.clone(), current); + } + }); + + Ok(()) +} + /// Handles on-demand track creation for a broadcast. /// -/// When a consumer requests a track that doesn't exist, a [TrackProducer] is created -/// and queued for the dynamic producer to fulfill via [Self::requested_track]. -/// Dropped when no longer needed; pending requests are automatically aborted. +/// When a consumer requests a track that doesn't exist, a [`TrackRequest`] is queued +/// for the dynamic producer to fulfill via [`Self::requested_track`]. +/// Dropped when no longer needed; pending requests are automatically rejected. #[derive(Clone)] pub struct BroadcastDynamic { info: Broadcast, @@ -199,18 +280,29 @@ impl BroadcastDynamic { }) } - pub fn poll_requested_track(&mut self, waiter: &conducer::Waiter) -> Poll> { + pub fn poll_requested_track(&mut self, waiter: &conducer::Waiter) -> Poll> { self.poll(waiter, |state| match state.requests.pop() { - Some(producer) => Poll::Ready(producer), + Some(request) => Poll::Ready(request), None => Poll::Pending, }) } - /// Block until a consumer requests a track, returning its producer. - pub async fn requested_track(&mut self) -> Result { + /// Block until a consumer requests a track, returning the request. + /// + /// The handler should create a [`TrackProducer`], insert it via [`Self::insert_track`], + /// and then call [`TrackRequest::respond`] with the consumer. + pub async fn requested_track(&mut self) -> Result { conducer::wait(|waiter| self.poll_requested_track(waiter)).await } + /// Insert a track into the broadcast lookup. + /// + /// Call this before responding to a [`TrackRequest`] so that subsequent + /// [`BroadcastConsumer::subscribe_track`] calls find the track immediately. + pub fn insert_track(&self, track: &TrackProducer) -> Result<(), Error> { + insert_track_impl(&self.state, track) + } + /// Create a consumer that can subscribe to tracks in this broadcast. pub fn consume(&self) -> BroadcastConsumer { BroadcastConsumer { @@ -228,9 +320,9 @@ impl BroadcastDynamic { weak.abort(err.clone()); } - // Abort any pending dynamic track requests. - for mut request in guard.requests.drain(..) { - request.abort(err.clone()).ok(); + // Reject any pending dynamic track requests. + for request in guard.requests.drain(..) { + request.reject(err.clone()); } guard.abort = Some(err); @@ -253,9 +345,9 @@ impl Drop for BroadcastDynamic { return; } - // Abort all pending requests since there's no dynamic producer to handle them. - for mut request in state.requests.drain(..) { - request.abort(Error::Cancel).ok(); + // Reject all pending requests since there's no dynamic producer to handle them. + for request in state.requests.drain(..) { + request.reject(Error::Cancel); } } } @@ -266,7 +358,7 @@ use futures::FutureExt; #[cfg(test)] impl BroadcastDynamic { - pub fn assert_request(&mut self) -> TrackProducer { + pub fn assert_request(&mut self) -> TrackRequest { self.requested_track() .now_or_never() .expect("should not have blocked") @@ -294,56 +386,48 @@ impl Deref for BroadcastConsumer { } impl BroadcastConsumer { - pub fn subscribe_track(&self, track: &Track) -> Result { - // Upgrade to a temporary producer so we can modify the state. - let producer = self - .state - .produce() - .ok_or_else(|| self.state.read().abort.clone().unwrap_or(Error::Dropped))?; - let mut state = modify(&producer)?; - - if let Some(weak) = state.tracks.get(&track.name) { - if !weak.is_closed() { - return Ok(weak.consume()); + /// Subscribe to a track by name. + /// + /// If the track already exists in the lookup and is still active, returns its consumer + /// immediately. Otherwise, queues a [`TrackRequest`] for the dynamic handler and awaits + /// its response. + pub async fn subscribe_track(&self, track: &Track) -> Result { + let rx = { + // Upgrade to a temporary producer so we can modify the state. + let producer = self + .state + .produce() + .ok_or_else(|| self.state.read().abort.clone().unwrap_or(Error::Dropped))?; + let mut state = modify(&producer)?; + + if let Some(weak) = state.tracks.get(&track.name) { + if !weak.is_closed() { + return Ok(weak.consume()); + } + // Remove the stale entry + state.tracks.remove(&track.name); } - // Remove the stale entry - state.tracks.remove(&track.name); - } - // Otherwise we have never seen this track before and need to create a new producer. - let producer = track.clone().produce(); - let consumer = producer.consume(); - - if state.dynamic == 0 { - return Err(Error::NotFound); - } - - // Insert a weak reference for deduplication. - let weak = producer.weak(); - state.tracks.insert(producer.info.name.clone(), weak.clone()); - state.requests.push(producer); - - // Remove the track from the lookup when it's unused. - let consumer_state = self.state.clone(); - web_async::spawn(async move { - let _ = weak.unused().await; + if state.dynamic == 0 { + return Err(Error::NotFound); + } - let Some(producer) = consumer_state.produce() else { - return; - }; - let Ok(mut state) = producer.write() else { - return; - }; + let (tx, rx) = tokio::sync::oneshot::channel(); - // Remove the entry, but reinsert if it was replaced by a different reference. - if let Some(current) = state.tracks.remove(&weak.info.name) - && !current.is_clone(&weak) - { - state.tracks.insert(current.info.name.clone(), current); + // Dedup: if there's already a pending request for this track, piggyback. + if let Some(existing) = state.requests.iter_mut().find(|r| r.info.name == track.name) { + existing.replies.push(tx); + } else { + state.requests.push(TrackRequest { + info: track.clone(), + replies: vec![tx], + }); } - }); - Ok(consumer) + rx + }; + + rx.await.map_err(|_| Error::Dropped)? } pub async fn closed(&self) -> Error { @@ -359,8 +443,8 @@ impl BroadcastConsumer { #[cfg(test)] impl BroadcastConsumer { - pub fn assert_subscribe_track(&self, track: &Track) -> TrackConsumer { - self.subscribe_track(track).expect("should not have errored") + pub async fn assert_subscribe_track(&self, track: &Track) -> TrackConsumer { + self.subscribe_track(track).await.expect("should not have errored") } pub fn assert_not_closed(&self) { @@ -387,14 +471,14 @@ mod test { let consumer = producer.consume(); - let mut track1_sub = consumer.assert_subscribe_track(&Track::new("track1")); + let mut track1_sub = consumer.assert_subscribe_track(&Track::new("track1")).await; track1_sub.assert_group(); let mut track2 = Track::new("track2").produce(); producer.assert_insert_track(&track2); let consumer2 = producer.consume(); - let mut track2_consumer = consumer2.assert_subscribe_track(&Track::new("track2")); + let mut track2_consumer = consumer2.assert_subscribe_track(&Track::new("track2")).await; track2_consumer.assert_no_group(); track2.append_group().unwrap(); @@ -405,133 +489,194 @@ mod test { #[tokio::test] async fn closed() { let mut producer = Broadcast::new().produce(); - let _dynamic = producer.dynamic(); + let dynamic = producer.dynamic(); let consumer = producer.consume(); consumer.assert_not_closed(); // Create a new track and insert it into the broadcast. let track1 = producer.assert_create_track(&Track::new("track1")); - let track1c = consumer.assert_subscribe_track(&track1.info); - let track2 = consumer.assert_subscribe_track(&Track::new("track2")); + let track1c = consumer.assert_subscribe_track(&track1.info).await; + + // Subscribe to a dynamic track -- this will pend until the handler responds. + let consumer2 = consumer.clone(); + let track2_handle = tokio::spawn(async move { consumer2.subscribe_track(&Track::new("track2")).await }); // Explicitly aborting the broadcast should cascade to child tracks. + drop(dynamic); producer.abort(Error::Cancel).unwrap(); - // The requested TrackProducer should have been aborted. - track2.assert_error(); - - // track1 should also be closed because close() cascades. + // track1 should be closed because close() cascades. track1c.assert_error(); - - // track1's producer should also be closed. assert!(track1.is_closed()); + + // track2 should have been rejected. + let track2_result = track2_handle.await.unwrap(); + assert!(track2_result.is_err()); } #[tokio::test] async fn requests() { - let mut producer = Broadcast::new().produce().dynamic(); + let broadcast = Broadcast::new().produce(); + let mut dynamic = broadcast.dynamic(); - let consumer = producer.consume(); + let consumer = broadcast.consume(); let consumer2 = consumer.clone(); - let mut track1 = consumer.assert_subscribe_track(&Track::new("track1")); - track1.assert_not_closed(); - track1.assert_no_group(); + // Subscribe to a dynamic track -- pends until handler responds. + let consumer_clone = consumer.clone(); + let track1_handle = + tokio::spawn(async move { consumer_clone.subscribe_track(&Track::new("track1")).await.unwrap() }); - // Make sure we deduplicate requests while track1 is still active. - let mut track2 = consumer2.assert_subscribe_track(&Track::new("track1")); - track2.assert_is_clone(&track1); + // Let the subscribe_track call execute and queue the request. + tokio::task::yield_now().await; - // Get the requested track, and there should only be one. - let mut track3 = producer.assert_request(); - producer.assert_no_request(); + // Get the request -- there should be exactly one. + let request = dynamic.assert_request(); + dynamic.assert_no_request(); + assert_eq!(request.info.name, "track1"); - // Make sure the consumer is the same. - track3.consume().assert_is_clone(&track1); + // Handler creates the producer, inserts it, and responds. + let mut track_producer = request.info.clone().produce(); + dynamic.insert_track(&track_producer).unwrap(); + request.respond(track_producer.consume()); - // Append a group and make sure they all get it. - track3.append_group().unwrap(); - track1.assert_group(); - track2.assert_group(); + // The subscriber should now have a consumer. + let mut track1 = track1_handle.await.unwrap(); + track1.assert_no_group(); + + // Dedup: subscribing to the same track should return the existing one. + let mut track1_dup = consumer2.assert_subscribe_track(&Track::new("track1")).await; + track1_dup.assert_is_clone(&track1); - // Make sure that tracks are cancelled when the producer is dropped. - let track4 = consumer.assert_subscribe_track(&Track::new("track2")); - drop(producer); + // Append a group and make sure both get it. + track_producer.append_group().unwrap(); + track1.assert_group(); + track1_dup.assert_group(); - // Make sure the track is errored, not closed. - track4.assert_error(); + // Make sure that pending requests are rejected when dynamic is dropped. + let consumer3 = consumer.clone(); + let track2_handle = tokio::spawn(async move { consumer3.subscribe_track(&Track::new("track2")).await }); + tokio::task::yield_now().await; + drop(dynamic); - let track5 = consumer2.subscribe_track(&Track::new("track3")); - assert!(track5.is_err(), "should have errored"); + let track2_result = track2_handle.await.unwrap(); + assert!(track2_result.is_err(), "should have errored"); } #[tokio::test] async fn stale_producer() { - let mut broadcast = Broadcast::new().produce().dynamic(); + let broadcast = Broadcast::new().produce(); + let mut dynamic = broadcast.dynamic(); let consumer = broadcast.consume(); - // Subscribe to a track, creating a request - let track1 = consumer.assert_subscribe_track(&Track::new("track1")); + // Subscribe to a track. + let consumer_clone = consumer.clone(); + let track1_handle = + tokio::spawn(async move { consumer_clone.subscribe_track(&Track::new("track1")).await.unwrap() }); + tokio::task::yield_now().await; + + // Handle the request. + let request = dynamic.assert_request(); + let mut producer1 = request.info.clone().produce(); + dynamic.insert_track(&producer1).unwrap(); + request.respond(producer1.consume()); + + let track1 = track1_handle.await.unwrap(); - // Get the requested producer and close it (simulating publisher disconnect) - let mut producer1 = broadcast.assert_request(); + // Close the producer (simulating publisher disconnect). producer1.append_group().unwrap(); producer1.finish().unwrap(); drop(producer1); - // The consumer should see the track as closed + // The consumer should see the track as closed. track1.assert_closed(); - // Subscribe again to the same track - should get a NEW producer, not the stale one - let mut track2 = consumer.assert_subscribe_track(&Track::new("track1")); + // Subscribe again to the same track. + let consumer_clone = consumer.clone(); + let track2_handle = + tokio::spawn(async move { consumer_clone.subscribe_track(&Track::new("track1")).await.unwrap() }); + tokio::task::yield_now().await; + + // Should be a new request. + let request2 = dynamic.assert_request(); + let mut producer2 = request2.info.clone().produce(); + dynamic.insert_track(&producer2).unwrap(); + request2.respond(producer2.consume()); + + let mut track2 = track2_handle.await.unwrap(); track2.assert_not_closed(); track2.assert_not_clone(&track1); - // There should be a new request for the track - let mut producer2 = broadcast.assert_request(); producer2.append_group().unwrap(); - - // The new consumer should receive the new group track2.assert_group(); } + #[tokio::test] + async fn dedup_pending() { + let broadcast = Broadcast::new().produce(); + let mut dynamic = broadcast.dynamic(); + let consumer = broadcast.consume(); + + // Two concurrent subscribers for the same track. + let c1 = consumer.clone(); + let c2 = consumer.clone(); + let h1 = tokio::spawn(async move { c1.subscribe_track(&Track::new("t")).await.unwrap() }); + let h2 = tokio::spawn(async move { c2.subscribe_track(&Track::new("t")).await.unwrap() }); + tokio::task::yield_now().await; + + // Only one request should be queued. + let request = dynamic.assert_request(); + dynamic.assert_no_request(); + + // Respond. + let producer = request.info.clone().produce(); + dynamic.insert_track(&producer).unwrap(); + request.respond(producer.consume()); + + let t1 = h1.await.unwrap(); + let t2 = h2.await.unwrap(); + t1.assert_is_clone(&t2); + } + #[tokio::test] async fn requested_unused() { tokio::time::pause(); - let mut broadcast = Broadcast::new().produce().dynamic(); + let broadcast = Broadcast::new().produce(); + let mut dynamic = broadcast.dynamic(); + + // Subscribe to a track. + let c1 = broadcast.consume(); + let c1_clone = c1.clone(); + let h1 = tokio::spawn(async move { c1_clone.subscribe_track(&Track::new("unknown_track")).await.unwrap() }); + tokio::task::yield_now().await; - // Subscribe to a track that doesn't exist - this creates a request - let consumer1 = broadcast.consume().assert_subscribe_track(&Track::new("unknown_track")); + // Handle the request. + let request = dynamic.assert_request(); + let producer1 = request.info.clone().produce(); + dynamic.insert_track(&producer1).unwrap(); + request.respond(producer1.consume()); - // Get the requested track producer - let producer1 = broadcast.assert_request(); + let consumer1 = h1.await.unwrap(); - // The track producer should NOT be unused yet because there's a consumer + // The track producer should NOT be unused yet because there's a consumer. assert!( producer1.unused().now_or_never().is_none(), "track producer should be used" ); - // Making a new consumer will keep the producer alive - let consumer2 = broadcast.consume().assert_subscribe_track(&Track::new("unknown_track")); + // Making a new consumer will keep the producer alive. + let consumer2 = c1.assert_subscribe_track(&Track::new("unknown_track")).await; consumer2.assert_is_clone(&consumer1); - // Drop the consumer subscription drop(consumer1); - - // The track producer should NOT be unused yet because there's a consumer assert!( producer1.unused().now_or_never().is_none(), "track producer should be used" ); - // Drop the second consumer, now the producer should be unused + // Drop the second consumer, now the producer should be unused. drop(consumer2); - - // BUG: The track producer should become unused after dropping the consumer, - // but it won't because the broadcast keeps a reference in the lookup HashMap - // This assertion will fail, demonstrating the bug assert!( producer1.unused().now_or_never().is_some(), "track producer should be unused after consumer is dropped" @@ -540,12 +685,19 @@ mod test { // Advance paused time to let the async cleanup task run. tokio::time::advance(std::time::Duration::from_millis(1)).await; - // Now the cleanup task should have run and we can subscribe again to the unknown track. - let consumer3 = broadcast.consume().subscribe_track(&Track::new("unknown_track")); - let producer2 = broadcast.assert_request(); + // Now we can subscribe again. + let c2 = broadcast.consume(); + let c2_clone = c2.clone(); + let h2 = tokio::spawn(async move { c2_clone.subscribe_track(&Track::new("unknown_track")).await }); + tokio::task::yield_now().await; + + let request2 = dynamic.assert_request(); + let producer2 = request2.info.clone().produce(); + dynamic.insert_track(&producer2).unwrap(); + request2.respond(producer2.consume()); + let _ = h2.await.unwrap(); - // Drop the consumer, now the producer should be unused - drop(consumer3); + drop(c2); assert!( producer2.unused().now_or_never().is_some(), "track producer should be unused after consumer is dropped" diff --git a/rs/moq-lite/src/model/group.rs b/rs/moq-lite/src/model/group.rs index 504189032..07bc85382 100644 --- a/rs/moq-lite/src/model/group.rs +++ b/rs/moq-lite/src/model/group.rs @@ -201,6 +201,11 @@ impl GroupProducer { .await .map_err(|r| r.abort.clone().unwrap_or(Error::Dropped)) } + + /// Returns true if this group was aborted (closed with an error). + pub fn is_aborted(&self) -> bool { + self.state.read().abort.is_some() + } } impl Clone for GroupProducer { diff --git a/rs/moq-lite/src/model/track.rs b/rs/moq-lite/src/model/track.rs index fa954fbb1..c997fb115 100644 --- a/rs/moq-lite/src/model/track.rs +++ b/rs/moq-lite/src/model/track.rs @@ -18,6 +18,7 @@ use super::{Group, GroupConsumer, GroupProducer}; use std::{ collections::{HashSet, VecDeque}, + sync::atomic, task::{Poll, ready}, time::Duration, }; @@ -31,7 +32,12 @@ const MAX_GROUP_AGE: Duration = Duration::from_secs(30); #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Track { pub name: String, + /// Publisher's preferred priority (ceiling for subscriptions). pub priority: u8, + /// Publisher's ordering preference (default false). + pub ordered: bool, + /// Publisher's max cache/latency. Duration::ZERO means unlimited. + pub max_latency: Duration, } impl Track { @@ -39,6 +45,8 @@ impl Track { Self { name: name.into(), priority: 0, + ordered: false, + max_latency: Duration::ZERO, } } @@ -47,6 +55,14 @@ impl Track { } } +/// Per-subscriber preferences for a track subscription. +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct Subscription { + pub priority: u8, + pub ordered: bool, + pub max_latency: Duration, +} + #[derive(Default)] struct State { /// Groups in arrival order. `None` entries are tombstones for evicted groups. @@ -56,6 +72,9 @@ struct State { max_sequence: Option, final_sequence: Option, abort: Option, + + /// Per-consumer subscription entries, keyed by auto-incrementing ID. + subscriptions: Vec<(u64, Subscription)>, } impl State { @@ -143,6 +162,54 @@ impl State { } } + /// Compute the aggregate subscription from all active entries, capped by the track info. + /// + /// Returns `None` if there are no subscriptions. + fn max_subscription(&self, cap: &Track) -> Option { + if self.subscriptions.is_empty() { + return None; + } + + let priority = self + .subscriptions + .iter() + .map(|(_, s)| s.priority) + .max() + .unwrap() + .min(cap.priority); + + // ordered is true only if ALL subscribers want ordered AND the publisher allows it. + let ordered = cap.ordered && self.subscriptions.iter().all(|(_, s)| s.ordered); + + // max_latency: max across subscriptions (ZERO = unlimited wins), capped by publisher. + let sub_max = self + .subscriptions + .iter() + .map(|(_, s)| s.max_latency) + .reduce(|a, b| { + if a.is_zero() || b.is_zero() { + Duration::ZERO + } else { + a.max(b) + } + }) + .unwrap(); + + let max_latency = if cap.max_latency.is_zero() { + sub_max + } else if sub_max.is_zero() { + cap.max_latency + } else { + sub_max.min(cap.max_latency) + }; + + Some(Subscription { + priority, + ordered, + max_latency, + }) + } + fn poll_finished(&self) -> Poll> { if let Some(fin) = self.final_sequence { Poll::Ready(Ok(fin)) @@ -169,6 +236,10 @@ impl TrackProducer { } /// Create a new group with the given sequence number. + /// + /// If a group with the same sequence already exists but was aborted (e.g. due to a + /// cancelled subscription), it will be replaced. Successfully completed groups + /// return `Err(Error::Duplicate)`. pub fn create_group(&mut self, info: Group) -> Result { let group = info.produce(); @@ -180,6 +251,22 @@ impl TrackProducer { } if !state.duplicates.insert(group.info.sequence) { + // Sequence exists -- check if the existing group was aborted. + for slot in state.groups.iter_mut() { + if let Some((existing, _)) = slot { + if existing.info.sequence == group.info.sequence { + if !existing.is_aborted() { + return Err(Error::Duplicate); + } + // Replace the aborted group. + let now = tokio::time::Instant::now(); + *slot = Some((group.clone(), now)); + state.evict_expired(now); + return Ok(group); + } + } + } + // In duplicates set but not in the VecDeque -- shouldn't happen. return Err(Error::Duplicate); } @@ -316,6 +403,31 @@ impl TrackProducer { } } + /// Poll for changes to the aggregate subscription. + /// + /// Returns `Ready(sub)` when the aggregate differs from `prev`. + /// Returns `Ready(None)` when no subscriptions are active (or the track is closed). + pub fn poll_max(&self, waiter: &conducer::Waiter, prev: Option<&Subscription>) -> Poll> { + let info = &self.info; + match self.state.poll(waiter, |state| { + let current = state.max_subscription(info); + if current.as_ref() != prev { + Poll::Ready(current) + } else { + Poll::Pending + } + }) { + Poll::Ready(Ok(sub)) => Poll::Ready(sub), + Poll::Ready(Err(_)) => Poll::Ready(None), + Poll::Pending => Poll::Pending, + } + } + + /// Synchronous snapshot of the current aggregate subscription. + pub fn max_subscription(&self) -> Option { + self.state.read().max_subscription(&self.info) + } + fn modify(&self) -> Result> { self.state .write() @@ -383,6 +495,45 @@ impl TrackWeak { } } +/// Tracks a single consumer's subscription preferences within a track. +/// +/// Created via [`TrackConsumer::subscribe`]. Registers this subscription in the +/// shared state on creation; automatically removes it on drop. +/// Does NOT iterate groups -- purely for subscription lifecycle. +pub struct TrackSubscription { + id: u64, + info: Subscription, + state: conducer::Weak, +} + +/// Global counter for subscription IDs. +static NEXT_SUB_ID: atomic::AtomicU64 = atomic::AtomicU64::new(0); + +impl TrackSubscription { + /// Update this subscription's preferences. + pub fn update(&mut self, sub: Subscription) { + if let Ok(mut state) = self.state.write() { + if let Some((_, existing)) = state.subscriptions.iter_mut().find(|(id, _)| *id == self.id) { + *existing = sub.clone(); + } + } + self.info = sub; + } + + /// The current subscription preferences. + pub fn info(&self) -> &Subscription { + &self.info + } +} + +impl Drop for TrackSubscription { + fn drop(&mut self) { + if let Ok(mut state) = self.state.write() { + state.subscriptions.retain(|(id, _)| *id != self.id); + } + } +} + /// A consumer for a track, used to read groups. #[derive(Clone)] pub struct TrackConsumer { @@ -468,6 +619,27 @@ impl TrackConsumer { conducer::wait(|waiter| self.poll_finished(waiter)).await } + /// Register a subscription with the given preferences. + /// + /// The returned [`TrackSubscription`] tracks this consumer's preferences. + /// Dropping it removes the subscription from the aggregate. + /// Cloning a `TrackConsumer` does NOT clone any subscription. + pub fn subscribe(&self, sub: Subscription) -> Result { + let producer = self.state.produce().ok_or(Error::Dropped)?; + let mut state = producer.write().map_err(|_| Error::Dropped)?; + let id = NEXT_SUB_ID.fetch_add(1, atomic::Ordering::Relaxed); + state.subscriptions.push((id, sub.clone())); + drop(state); + let weak = producer.weak(); + drop(producer); + + Ok(TrackSubscription { + id, + info: sub, + state: weak, + }) + } + /// Start the consumer at the specified sequence. pub fn start_at(&mut self, sequence: u64) { self.min_sequence = sequence; @@ -802,4 +974,300 @@ mod test { assert!(matches!(producer.append_group(), Err(Error::BoundsExceeded))); } + + // --- Subscription tests --- + + #[test] + fn subscription_max_aggregation() { + let cap = Track { + name: "test".into(), + priority: 10, + ordered: true, + max_latency: Duration::from_secs(30), + }; + + let mut state = State::default(); + + // No subscriptions → None. + assert_eq!(state.max_subscription(&cap), None); + + // Single subscription. + state.subscriptions.push(( + 0, + Subscription { + priority: 5, + ordered: true, + max_latency: Duration::from_secs(10), + }, + )); + assert_eq!( + state.max_subscription(&cap), + Some(Subscription { + priority: 5, // min(5, 10) = 5 + ordered: true, // all true AND cap true + max_latency: Duration::from_secs(10), // min(10, 30) = 10 + }) + ); + + // Add a second with higher priority, unordered, longer latency. + state.subscriptions.push(( + 1, + Subscription { + priority: 8, + ordered: false, + max_latency: Duration::from_secs(20), + }, + )); + assert_eq!( + state.max_subscription(&cap), + Some(Subscription { + priority: 8, // max(5, 8) = 8, capped by 10 + ordered: false, // not all true + max_latency: Duration::from_secs(20), // max(10, 20) = 20, capped by 30 + }) + ); + } + + #[test] + fn subscription_max_latency_zero_is_unlimited() { + let cap = Track { + name: "test".into(), + priority: 10, + ordered: false, + max_latency: Duration::from_secs(30), + }; + + let mut state = State::default(); + state.subscriptions.push(( + 0, + Subscription { + priority: 5, + ordered: false, + max_latency: Duration::ZERO, // unlimited + }, + )); + state.subscriptions.push(( + 1, + Subscription { + priority: 5, + ordered: false, + max_latency: Duration::from_secs(10), + }, + )); + + let sub = state.max_subscription(&cap).unwrap(); + // ZERO (unlimited) wins the max, then capped by 30 → 30. + assert_eq!(sub.max_latency, Duration::from_secs(30)); + } + + #[test] + fn subscription_capped_by_track() { + let cap = Track { + name: "test".into(), + priority: 3, + ordered: false, + max_latency: Duration::from_secs(5), + }; + + let mut state = State::default(); + state.subscriptions.push(( + 0, + Subscription { + priority: 10, // exceeds cap + ordered: true, + max_latency: Duration::from_secs(20), // exceeds cap + }, + )); + + let sub = state.max_subscription(&cap).unwrap(); + assert_eq!(sub.priority, 3); // capped + assert!(!sub.ordered); // cap.ordered is false + assert_eq!(sub.max_latency, Duration::from_secs(5)); // capped + } + + #[test] + fn track_subscription_lifecycle() { + let producer = Track { + name: "test".into(), + priority: 10, + ordered: true, + max_latency: Duration::ZERO, // unlimited cap + } + .produce(); + let consumer = producer.consume(); + + assert_eq!(producer.max_subscription(), None); + + // Create a subscription. + let sub1 = consumer + .subscribe(Subscription { + priority: 5, + ordered: false, + max_latency: Duration::from_secs(10), + }) + .unwrap(); + assert_eq!( + producer.max_subscription(), + Some(Subscription { + priority: 5, + ordered: false, + max_latency: Duration::from_secs(10), + }) + ); + + // Create a second subscription. + let _sub2 = consumer + .subscribe(Subscription { + priority: 3, + ordered: true, + max_latency: Duration::from_secs(5), + }) + .unwrap(); + assert_eq!( + producer.max_subscription(), + Some(Subscription { + priority: 5, // max(5, 3) + ordered: false, // not all true + max_latency: Duration::from_secs(10), // max(10, 5) + }) + ); + + // Drop first subscription → only second remains. + drop(sub1); + assert_eq!( + producer.max_subscription(), + Some(Subscription { + priority: 3, + ordered: true, + max_latency: Duration::from_secs(5), + }) + ); + } + + #[test] + fn track_subscription_update() { + let producer = Track { + name: "test".into(), + priority: 10, + ordered: true, + max_latency: Duration::ZERO, + } + .produce(); + let consumer = producer.consume(); + + let mut sub = consumer + .subscribe(Subscription { + priority: 5, + ordered: false, + max_latency: Duration::from_secs(10), + }) + .unwrap(); + + sub.update(Subscription { + priority: 8, + ordered: true, + max_latency: Duration::from_secs(20), + }); + + assert_eq!( + producer.max_subscription(), + Some(Subscription { + priority: 8, + ordered: true, + max_latency: Duration::from_secs(20), + }) + ); + } + + #[test] + fn track_subscription_drop_removes() { + let producer = Track { + name: "test".into(), + priority: 10, + ordered: true, + max_latency: Duration::ZERO, + } + .produce(); + let consumer = producer.consume(); + + let sub = consumer + .subscribe(Subscription { + priority: 5, + ordered: false, + max_latency: Duration::from_secs(10), + }) + .unwrap(); + assert!(producer.max_subscription().is_some()); + + drop(sub); + assert_eq!(producer.max_subscription(), None); + } + + #[tokio::test] + async fn poll_max_detects_changes() { + let producer = Track { + name: "test".into(), + priority: 10, + ordered: true, + max_latency: Duration::ZERO, + } + .produce(); + let consumer = producer.consume(); + + // Initially no subscriptions → poll_max should return None immediately. + let result = conducer::wait(|waiter| producer.poll_max(waiter, Some(&Subscription::default()))).await; + assert_eq!(result, None); + + // Subscribe → poll_max with prev=None should return the new subscription. + let _sub = consumer + .subscribe(Subscription { + priority: 5, + ordered: false, + max_latency: Duration::from_secs(10), + }) + .unwrap(); + + let result = conducer::wait(|waiter| producer.poll_max(waiter, None)).await; + assert_eq!( + result, + Some(Subscription { + priority: 5, + ordered: false, + max_latency: Duration::from_secs(10), + }) + ); + } + + #[tokio::test] + async fn create_group_replaces_aborted() { + let mut producer = Track::new("test").produce(); + + // Create and abort a group. + let mut group = producer.create_group(Group { sequence: 5 }).unwrap(); + group.abort(Error::Cancel).unwrap(); + + // Creating the same group again should succeed (replaces aborted). + let group2 = producer.create_group(Group { sequence: 5 }); + assert!(group2.is_ok(), "should replace aborted group"); + + // Creating again on a non-aborted group should fail. + let group3 = producer.create_group(Group { sequence: 5 }); + assert!( + matches!(group3, Err(Error::Duplicate)), + "should not replace active group" + ); + } + + #[tokio::test] + async fn create_group_does_not_replace_finished() { + let mut producer = Track::new("test").produce(); + + // Create and finish a group. + let mut group = producer.create_group(Group { sequence: 5 }).unwrap(); + group.finish().unwrap(); + + // Should fail because the group finished successfully. + let result = producer.create_group(Group { sequence: 5 }); + assert!(matches!(result, Err(Error::Duplicate))); + } } diff --git a/rs/moq-mux/src/catalog.rs b/rs/moq-mux/src/catalog.rs index a99b85a02..37061ddd5 100644 --- a/rs/moq-mux/src/catalog.rs +++ b/rs/moq-mux/src/catalog.rs @@ -32,6 +32,8 @@ impl CatalogProducer { let msf_track = broadcast.create_track(moq_lite::Track { name: moq_msf::DEFAULT_NAME.to_string(), priority: 100, + ordered: false, + max_latency: std::time::Duration::ZERO, })?; Ok(Self { diff --git a/rs/moq-native/examples/chat.rs b/rs/moq-native/examples/chat.rs index c459d2f91..f28cfd279 100644 --- a/rs/moq-native/examples/chat.rs +++ b/rs/moq-native/examples/chat.rs @@ -41,10 +41,7 @@ async fn run_broadcast(origin: moq_lite::OriginProducer) -> anyhow::Result<()> { // Create a track that we'll insert into the broadcast. // A track is a series of groups representing a live stream. - let mut track = broadcast.create_track(moq_lite::Track { - name: "chat".to_string(), - priority: 0, - })?; + let mut track = broadcast.create_track(moq_lite::Track::new("chat"))?; // NOTE: The path is empty because we're using the URL to scope the broadcast. // If you put "alice" here, it would be published as "anon/chat-example/alice". diff --git a/rs/moq-native/tests/backend.rs b/rs/moq-native/tests/backend.rs index 39739f469..4324e59f6 100644 --- a/rs/moq-native/tests/backend.rs +++ b/rs/moq-native/tests/backend.rs @@ -71,6 +71,7 @@ async fn backend_test(scheme: &str, backend: moq_native::QuicBackend) { let mut track_sub = bc .subscribe_track(&Track::new("video")) + .await .expect("subscribe_track failed"); let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) @@ -223,6 +224,7 @@ async fn iroh_connect() { let mut track_sub = bc .subscribe_track(&Track::new("video")) + .await .expect("subscribe_track failed"); let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) diff --git a/rs/moq-native/tests/broadcast.rs b/rs/moq-native/tests/broadcast.rs index c1ec6d93e..b11d60111 100644 --- a/rs/moq-native/tests/broadcast.rs +++ b/rs/moq-native/tests/broadcast.rs @@ -89,6 +89,7 @@ async fn broadcast_test(scheme: &str, client_version: Option<&str>, server_versi // Subscribe to the track. let mut track_sub = bc .subscribe_track(&Track::new("video")) + .await .expect("subscribe_track failed"); // Read one group. @@ -429,6 +430,7 @@ async fn broadcast_websocket() { // Subscribe to the track. let mut track_sub = bc .subscribe_track(&Track::new("video")) + .await .expect("subscribe_track failed"); // Read one group. @@ -536,6 +538,7 @@ async fn broadcast_websocket_fallback() { // Subscribe to the track. let mut track_sub = bc .subscribe_track(&Track::new("video")) + .await .expect("subscribe_track failed"); let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) diff --git a/rs/moq-relay/src/web.rs b/rs/moq-relay/src/web.rs index 141ead451..bfd9c3f34 100644 --- a/rs/moq-relay/src/web.rs +++ b/rs/moq-relay/src/web.rs @@ -280,11 +280,13 @@ async fn serve_fetch( let track = moq_lite::Track { name: track, priority: 0, + ordered: false, + max_latency: std::time::Duration::ZERO, }; // NOTE: The auth token is already scoped to the broadcast. let broadcast = origin.consume_broadcast("").ok_or(StatusCode::NOT_FOUND)?; - let mut track = broadcast.subscribe_track(&track).map_err(|err| match err { + let mut track = broadcast.subscribe_track(&track).await.map_err(|err| match err { moq_lite::Error::NotFound => StatusCode::NOT_FOUND, _ => StatusCode::INTERNAL_SERVER_ERROR, })?; From 179b83bccc7c9d52c7917480812aa4c33bff9e0d Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Wed, 18 Mar 2026 16:04:00 -0700 Subject: [PATCH 02/14] Simplify Track to just a name, add start/end to Subscription MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Track is now just `{ name: String }` — no priority/ordered/max_latency - Subscription gains `start: Option` and `end: Option` fields - Aggregation: start = min (None wins), end = max (None wins) - No cap/ceiling from producer — aggregation is purely across subscribers - TrackProducer::subscription() is async, blocks until aggregate changes - TrackProducer stores prev_subscription internally (no prev arg needed) - Remove TrackConsumer::start_at() — use Subscription.start instead - Remove subscription tests (will rewrite in follow-up) Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/hang/examples/subscribe.rs | 7 +- rs/hang/examples/video.rs | 7 +- rs/hang/src/catalog/audio/mod.rs | 8 +- rs/hang/src/catalog/root.rs | 7 +- rs/hang/src/catalog/video/mod.rs | 10 +- rs/libmoq/src/consume.rs | 14 +- rs/moq-clock/src/main.rs | 7 +- rs/moq-ffi/src/consumer.rs | 7 +- rs/moq-lite/src/ietf/publisher.rs | 3 +- rs/moq-lite/src/ietf/subscriber.rs | 2 +- rs/moq-lite/src/lite/publisher.rs | 13 +- rs/moq-lite/src/lite/subscriber.rs | 7 +- rs/moq-lite/src/model/track.rs | 388 +++++------------------------ rs/moq-mux/src/catalog.rs | 7 +- rs/moq-relay/src/web.rs | 7 +- 15 files changed, 90 insertions(+), 404 deletions(-) diff --git a/rs/hang/examples/subscribe.rs b/rs/hang/examples/subscribe.rs index ea8c4d6ed..7861df442 100644 --- a/rs/hang/examples/subscribe.rs +++ b/rs/hang/examples/subscribe.rs @@ -71,12 +71,7 @@ async fn run_subscribe(mut consumer: moq_lite::OriginConsumer) -> anyhow::Result ); // Subscribe to the video track. - let track = moq_lite::Track { - name: name.clone(), - priority: 1, - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + let track = moq_lite::Track::new(name.clone()); let track_consumer = broadcast.subscribe_track(&track).await?; let mut ordered = hang::container::OrderedConsumer::new(track_consumer, Duration::from_millis(500)); diff --git a/rs/hang/examples/video.rs b/rs/hang/examples/video.rs index c0c93cfca..a592ea606 100644 --- a/rs/hang/examples/video.rs +++ b/rs/hang/examples/video.rs @@ -40,12 +40,7 @@ async fn run_session(origin: moq_lite::OriginConsumer) -> anyhow::Result<()> { // The catalog can contain multiple tracks, used by the viewer to choose the best track. fn create_track(broadcast: &mut moq_lite::BroadcastProducer) -> anyhow::Result { // Basic information about the video track. - let video_track = moq_lite::Track { - name: "video".to_string(), - priority: 1, // Video typically has lower priority than audio - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + let video_track = moq_lite::Track::new("video"); // Example video configuration // In a real application, you would get this from the encoder diff --git a/rs/hang/src/catalog/audio/mod.rs b/rs/hang/src/catalog/audio/mod.rs index 1ec2ac1aa..65c7dcef2 100644 --- a/rs/hang/src/catalog/audio/mod.rs +++ b/rs/hang/src/catalog/audio/mod.rs @@ -38,13 +38,7 @@ impl Audio { if let btree_map::Entry::Vacant(entry) = self.renditions.entry(name.clone()) { entry.insert(config.clone()); - // TODO: Remove priority - return moq_lite::Track { - name, - priority: 2, - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + return moq_lite::Track::new(name); } } diff --git a/rs/hang/src/catalog/root.rs b/rs/hang/src/catalog/root.rs index fe13be508..5d8ae5fe9 100644 --- a/rs/hang/src/catalog/root.rs +++ b/rs/hang/src/catalog/root.rs @@ -77,12 +77,7 @@ impl Catalog { } pub fn default_track() -> moq_lite::Track { - moq_lite::Track { - name: Catalog::DEFAULT_NAME.to_string(), - priority: 100, - ordered: false, - max_latency: std::time::Duration::ZERO, - } + moq_lite::Track::new(Catalog::DEFAULT_NAME) } } diff --git a/rs/hang/src/catalog/video/mod.rs b/rs/hang/src/catalog/video/mod.rs index 31dea257b..b293f9a84 100644 --- a/rs/hang/src/catalog/video/mod.rs +++ b/rs/hang/src/catalog/video/mod.rs @@ -58,13 +58,7 @@ impl Video { }; if let btree_map::Entry::Vacant(entry) = self.renditions.entry(name.clone()) { entry.insert(config.clone()); - // TODO: Remove priority - return moq_lite::Track { - name, - priority: 1, - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + return moq_lite::Track::new(name); } } @@ -90,7 +84,7 @@ pub struct Display { /// This struct contains all the information needed to initialize a video decoder, /// including codec-specific parameters, resolution, and optional metadata. /// -/// Reference: +/// Reference: #[serde_with::serde_as] #[serde_with::skip_serializing_none] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] diff --git a/rs/libmoq/src/consume.rs b/rs/libmoq/src/consume.rs index fbf877fb4..f7f9b6fcb 100644 --- a/rs/libmoq/src/consume.rs +++ b/rs/libmoq/src/consume.rs @@ -223,12 +223,7 @@ impl Consume { .ok_or(Error::NoIndex)?; let broadcast = consume.broadcast.clone(); - let track_info = moq_lite::Track { - name: rendition.clone(), - priority: 1, // TODO: Remove priority - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + let track_info = moq_lite::Track::new(rendition.clone()); let channel = oneshot::channel(); let entry = TaskEntry { @@ -274,12 +269,7 @@ impl Consume { .ok_or(Error::NoIndex)?; let broadcast = consume.broadcast.clone(); - let track_info = moq_lite::Track { - name: rendition.clone(), - priority: 2, // TODO: Remove priority - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + let track_info = moq_lite::Track::new(rendition.clone()); let channel = oneshot::channel(); let entry = TaskEntry { diff --git a/rs/moq-clock/src/main.rs b/rs/moq-clock/src/main.rs index 972034b59..548464c61 100644 --- a/rs/moq-clock/src/main.rs +++ b/rs/moq-clock/src/main.rs @@ -53,12 +53,7 @@ async fn main() -> anyhow::Result<()> { tracing::info!(url = ?config.url, "connecting to server"); - let track = Track { - name: config.track, - priority: 0, - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + let track = Track::new(config.track); let origin = moq_lite::Origin::produce(); diff --git a/rs/moq-ffi/src/consumer.rs b/rs/moq-ffi/src/consumer.rs index 023162cd8..4bd02eeb1 100644 --- a/rs/moq-ffi/src/consumer.rs +++ b/rs/moq-ffi/src/consumer.rs @@ -87,12 +87,7 @@ impl MoqBroadcastConsumer { /// /// `max_latency_ms` controls the maximum buffering before skipping a GoP. pub fn subscribe_media(&self, name: String, max_latency_ms: u64) -> Result, MoqError> { - let track = crate::ffi::RUNTIME.block_on(self.inner.subscribe_track(&moq_lite::Track { - name, - priority: 0, - ordered: false, - max_latency: std::time::Duration::ZERO, - }))?; + let track = crate::ffi::RUNTIME.block_on(self.inner.subscribe_track(&moq_lite::Track::new(name)))?; let latency = std::time::Duration::from_millis(max_latency_ms); let consumer = hang::container::OrderedConsumer::new(track, latency); Ok(Arc::new(MoqMediaConsumer { diff --git a/rs/moq-lite/src/ietf/publisher.rs b/rs/moq-lite/src/ietf/publisher.rs index e204c089e..05f4c2033 100644 --- a/rs/moq-lite/src/ietf/publisher.rs +++ b/rs/moq-lite/src/ietf/publisher.rs @@ -237,8 +237,7 @@ impl Publisher { flags: Default::default(), }; - tasks - .push(Self::run_group(self.session.clone(), msg, track.info.priority, group, self.version).map(|_| ())); + tasks.push(Self::run_group(self.session.clone(), msg, 0, group, self.version).map(|_| ())); } } diff --git a/rs/moq-lite/src/ietf/subscriber.rs b/rs/moq-lite/src/ietf/subscriber.rs index 1e4f229f4..688d567cb 100644 --- a/rs/moq-lite/src/ietf/subscriber.rs +++ b/rs/moq-lite/src/ietf/subscriber.rs @@ -637,7 +637,7 @@ impl Subscriber { request_id, track_namespace: broadcast.to_owned(), track_name: (&track.info.name).into(), - subscriber_priority: track.info.priority, + subscriber_priority: 0, group_order: GroupOrder::Descending, filter_type: FilterType::LargestObject, }) diff --git a/rs/moq-lite/src/lite/publisher.rs b/rs/moq-lite/src/lite/publisher.rs index e7544b4c9..6efc99a12 100644 --- a/rs/moq-lite/src/lite/publisher.rs +++ b/rs/moq-lite/src/lite/publisher.rs @@ -266,9 +266,9 @@ impl Publisher { let track = broadcast.subscribe_track(&track).await?; let info = lite::SubscribeOk { - priority: track.info.priority, - ordered: track.info.ordered, - max_latency: track.info.max_latency, + priority: 0, + ordered: false, + max_latency: Duration::ZERO, start_group: None, end_group: None, }; @@ -293,10 +293,7 @@ impl Publisher { ) -> Result<(), Error> { let mut tasks = FuturesUnordered::new(); - // Start the consumer at the specified sequence, otherwise start at the latest group. - if let Some(start_group) = subscribe.start_group.or_else(|| track.latest()) { - track.start_at(start_group); - } + // No-op: start_at was removed from TrackConsumer loop { let group = tokio::select! { @@ -317,7 +314,7 @@ impl Publisher { sequence, }; - let priority = priority.insert(track.info.priority, sequence); + let priority = priority.insert(subscribe.priority, sequence); tasks.push(Self::serve_group(session.clone(), msg, priority, group, version).map(|_| ())); } } diff --git a/rs/moq-lite/src/lite/subscriber.rs b/rs/moq-lite/src/lite/subscriber.rs index 290b35703..c899b4cd0 100644 --- a/rs/moq-lite/src/lite/subscriber.rs +++ b/rs/moq-lite/src/lite/subscriber.rs @@ -1,6 +1,7 @@ use std::{ collections::{HashMap, hash_map::Entry}, sync::{Arc, atomic}, + time::Duration, }; use crate::{ @@ -204,9 +205,9 @@ impl Subscriber { id, broadcast: broadcast_path.to_owned(), track: (&track.info.name).into(), - priority: track.info.priority, - ordered: track.info.ordered, - max_latency: track.info.max_latency, + priority: 0, + ordered: false, + max_latency: Duration::ZERO, start_group: None, end_group: None, }; diff --git a/rs/moq-lite/src/model/track.rs b/rs/moq-lite/src/model/track.rs index c997fb115..a0ad437ef 100644 --- a/rs/moq-lite/src/model/track.rs +++ b/rs/moq-lite/src/model/track.rs @@ -27,27 +27,16 @@ use std::{ // TODO: Replace with a configurable cache size. const MAX_GROUP_AGE: Duration = Duration::from_secs(30); -/// A track is a collection of groups, delivered out-of-order until expired. -#[derive(Clone, Debug, PartialEq, Eq)] +/// A track is a collection of groups, identified by name. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Track { pub name: String, - /// Publisher's preferred priority (ceiling for subscriptions). - pub priority: u8, - /// Publisher's ordering preference (default false). - pub ordered: bool, - /// Publisher's max cache/latency. Duration::ZERO means unlimited. - pub max_latency: Duration, } impl Track { pub fn new>(name: T) -> Self { - Self { - name: name.into(), - priority: 0, - ordered: false, - max_latency: Duration::ZERO, - } + Self { name: name.into() } } pub fn produce(self) -> TrackProducer { @@ -55,12 +44,20 @@ impl Track { } } -/// Per-subscriber preferences for a track subscription. +/// Subscription preferences for a subscription or producer cap. +/// +/// Describes how groups should be delivered: priority, ordering, latency bounds, +/// and the range of groups requested. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Subscription { pub priority: u8, pub ordered: bool, + /// Maximum cache/latency. `Duration::ZERO` means unlimited. pub max_latency: Duration, + /// First group sequence to deliver. `None` means start at the latest. + pub start: Option, + /// Last group sequence to deliver. `None` means no end (live). + pub end: Option, } #[derive(Default)] @@ -162,27 +159,21 @@ impl State { } } - /// Compute the aggregate subscription from all active entries, capped by the track info. + /// Compute the aggregate subscription from all active entries. /// /// Returns `None` if there are no subscriptions. - fn max_subscription(&self, cap: &Track) -> Option { + fn subscription(&self) -> Option { if self.subscriptions.is_empty() { return None; } - let priority = self - .subscriptions - .iter() - .map(|(_, s)| s.priority) - .max() - .unwrap() - .min(cap.priority); + let priority = self.subscriptions.iter().map(|(_, s)| s.priority).max().unwrap(); - // ordered is true only if ALL subscribers want ordered AND the publisher allows it. - let ordered = cap.ordered && self.subscriptions.iter().all(|(_, s)| s.ordered); + // ordered is true only if ALL subscribers want ordered. + let ordered = self.subscriptions.iter().all(|(_, s)| s.ordered); - // max_latency: max across subscriptions (ZERO = unlimited wins), capped by publisher. - let sub_max = self + // max_latency: max across subscriptions. ZERO = unlimited wins. + let max_latency = self .subscriptions .iter() .map(|(_, s)| s.max_latency) @@ -195,18 +186,34 @@ impl State { }) .unwrap(); - let max_latency = if cap.max_latency.is_zero() { - sub_max - } else if sub_max.is_zero() { - cap.max_latency - } else { - sub_max.min(cap.max_latency) - }; + // start: min across all (earliest requested group). None = latest. + let start = self + .subscriptions + .iter() + .map(|(_, s)| s.start) + .reduce(|a, b| match (a, b) { + (Some(a), Some(b)) => Some(a.min(b)), + _ => None, + }) + .unwrap(); + + // end: max across all (latest / unlimited). None = no end (live). + let end = self + .subscriptions + .iter() + .map(|(_, s)| s.end) + .reduce(|a, b| match (a, b) { + (Some(a), Some(b)) => Some(a.max(b)), + _ => None, + }) + .unwrap(); Some(Subscription { priority, ordered, max_latency, + start, + end, }) } @@ -225,6 +232,8 @@ impl State { pub struct TrackProducer { pub info: Track, state: conducer::Producer, + /// The last aggregate subscription returned by [`Self::subscription`]. + prev_subscription: Option, } impl TrackProducer { @@ -232,6 +241,7 @@ impl TrackProducer { Self { info, state: conducer::Producer::default(), + prev_subscription: None, } } @@ -373,7 +383,6 @@ impl TrackProducer { info: self.info.clone(), state: self.state.consume(), index: 0, - min_sequence: 0, } } @@ -405,27 +414,35 @@ impl TrackProducer { /// Poll for changes to the aggregate subscription. /// - /// Returns `Ready(sub)` when the aggregate differs from `prev`. + /// Returns `Ready(sub)` when the aggregate differs from the last value returned. /// Returns `Ready(None)` when no subscriptions are active (or the track is closed). - pub fn poll_max(&self, waiter: &conducer::Waiter, prev: Option<&Subscription>) -> Poll> { - let info = &self.info; + pub fn poll_subscription(&mut self, waiter: &conducer::Waiter) -> Poll> { + let prev = self.prev_subscription.as_ref(); match self.state.poll(waiter, |state| { - let current = state.max_subscription(info); + let current = state.subscription(); if current.as_ref() != prev { Poll::Ready(current) } else { Poll::Pending } }) { - Poll::Ready(Ok(sub)) => Poll::Ready(sub), - Poll::Ready(Err(_)) => Poll::Ready(None), + Poll::Ready(Ok(sub)) => { + self.prev_subscription = sub.clone(); + Poll::Ready(sub) + } + Poll::Ready(Err(_)) => { + self.prev_subscription = None; + Poll::Ready(None) + } Poll::Pending => Poll::Pending, } } - /// Synchronous snapshot of the current aggregate subscription. - pub fn max_subscription(&self) -> Option { - self.state.read().max_subscription(&self.info) + /// Block until the aggregate subscription changes. + /// + /// Returns `None` when all subscriptions are dropped or the track is closed. + pub async fn subscription(&mut self) -> Option { + conducer::wait(|waiter| self.poll_subscription(waiter)).await } fn modify(&self) -> Result> { @@ -440,13 +457,14 @@ impl Clone for TrackProducer { Self { info: self.info.clone(), state: self.state.clone(), + prev_subscription: self.prev_subscription.clone(), } } } -impl From for TrackProducer { - fn from(info: Track) -> Self { - TrackProducer::new(info) +impl> From for TrackProducer { + fn from(info: T) -> Self { + TrackProducer::new(info.into()) } } @@ -479,7 +497,6 @@ impl TrackWeak { info: self.info.clone(), state: self.state.consume(), index: 0, - min_sequence: 0, } } @@ -540,8 +557,6 @@ pub struct TrackConsumer { pub info: Track, state: conducer::Consumer, index: usize, - - min_sequence: u64, } impl TrackConsumer { @@ -564,8 +579,7 @@ impl TrackConsumer { /// `Poll::Ready(Some(Err(e)))` when the track has been aborted, or /// `Poll::Pending` when no group is available yet. pub fn poll_next_group(&mut self, waiter: &conducer::Waiter) -> Poll>> { - let Some((consumer, found_index)) = - ready!(self.poll(waiter, |state| state.poll_next_group(self.index, self.min_sequence))?) + let Some((consumer, found_index)) = ready!(self.poll(waiter, |state| state.poll_next_group(self.index, 0))?) else { return Poll::Ready(Ok(None)); }; @@ -640,11 +654,6 @@ impl TrackConsumer { }) } - /// Start the consumer at the specified sequence. - pub fn start_at(&mut self, sequence: u64) { - self.min_sequence = sequence; - } - /// Return the latest sequence number in the track. pub fn latest(&self) -> Option { self.state.read().max_sequence @@ -975,269 +984,6 @@ mod test { assert!(matches!(producer.append_group(), Err(Error::BoundsExceeded))); } - // --- Subscription tests --- - - #[test] - fn subscription_max_aggregation() { - let cap = Track { - name: "test".into(), - priority: 10, - ordered: true, - max_latency: Duration::from_secs(30), - }; - - let mut state = State::default(); - - // No subscriptions → None. - assert_eq!(state.max_subscription(&cap), None); - - // Single subscription. - state.subscriptions.push(( - 0, - Subscription { - priority: 5, - ordered: true, - max_latency: Duration::from_secs(10), - }, - )); - assert_eq!( - state.max_subscription(&cap), - Some(Subscription { - priority: 5, // min(5, 10) = 5 - ordered: true, // all true AND cap true - max_latency: Duration::from_secs(10), // min(10, 30) = 10 - }) - ); - - // Add a second with higher priority, unordered, longer latency. - state.subscriptions.push(( - 1, - Subscription { - priority: 8, - ordered: false, - max_latency: Duration::from_secs(20), - }, - )); - assert_eq!( - state.max_subscription(&cap), - Some(Subscription { - priority: 8, // max(5, 8) = 8, capped by 10 - ordered: false, // not all true - max_latency: Duration::from_secs(20), // max(10, 20) = 20, capped by 30 - }) - ); - } - - #[test] - fn subscription_max_latency_zero_is_unlimited() { - let cap = Track { - name: "test".into(), - priority: 10, - ordered: false, - max_latency: Duration::from_secs(30), - }; - - let mut state = State::default(); - state.subscriptions.push(( - 0, - Subscription { - priority: 5, - ordered: false, - max_latency: Duration::ZERO, // unlimited - }, - )); - state.subscriptions.push(( - 1, - Subscription { - priority: 5, - ordered: false, - max_latency: Duration::from_secs(10), - }, - )); - - let sub = state.max_subscription(&cap).unwrap(); - // ZERO (unlimited) wins the max, then capped by 30 → 30. - assert_eq!(sub.max_latency, Duration::from_secs(30)); - } - - #[test] - fn subscription_capped_by_track() { - let cap = Track { - name: "test".into(), - priority: 3, - ordered: false, - max_latency: Duration::from_secs(5), - }; - - let mut state = State::default(); - state.subscriptions.push(( - 0, - Subscription { - priority: 10, // exceeds cap - ordered: true, - max_latency: Duration::from_secs(20), // exceeds cap - }, - )); - - let sub = state.max_subscription(&cap).unwrap(); - assert_eq!(sub.priority, 3); // capped - assert!(!sub.ordered); // cap.ordered is false - assert_eq!(sub.max_latency, Duration::from_secs(5)); // capped - } - - #[test] - fn track_subscription_lifecycle() { - let producer = Track { - name: "test".into(), - priority: 10, - ordered: true, - max_latency: Duration::ZERO, // unlimited cap - } - .produce(); - let consumer = producer.consume(); - - assert_eq!(producer.max_subscription(), None); - - // Create a subscription. - let sub1 = consumer - .subscribe(Subscription { - priority: 5, - ordered: false, - max_latency: Duration::from_secs(10), - }) - .unwrap(); - assert_eq!( - producer.max_subscription(), - Some(Subscription { - priority: 5, - ordered: false, - max_latency: Duration::from_secs(10), - }) - ); - - // Create a second subscription. - let _sub2 = consumer - .subscribe(Subscription { - priority: 3, - ordered: true, - max_latency: Duration::from_secs(5), - }) - .unwrap(); - assert_eq!( - producer.max_subscription(), - Some(Subscription { - priority: 5, // max(5, 3) - ordered: false, // not all true - max_latency: Duration::from_secs(10), // max(10, 5) - }) - ); - - // Drop first subscription → only second remains. - drop(sub1); - assert_eq!( - producer.max_subscription(), - Some(Subscription { - priority: 3, - ordered: true, - max_latency: Duration::from_secs(5), - }) - ); - } - - #[test] - fn track_subscription_update() { - let producer = Track { - name: "test".into(), - priority: 10, - ordered: true, - max_latency: Duration::ZERO, - } - .produce(); - let consumer = producer.consume(); - - let mut sub = consumer - .subscribe(Subscription { - priority: 5, - ordered: false, - max_latency: Duration::from_secs(10), - }) - .unwrap(); - - sub.update(Subscription { - priority: 8, - ordered: true, - max_latency: Duration::from_secs(20), - }); - - assert_eq!( - producer.max_subscription(), - Some(Subscription { - priority: 8, - ordered: true, - max_latency: Duration::from_secs(20), - }) - ); - } - - #[test] - fn track_subscription_drop_removes() { - let producer = Track { - name: "test".into(), - priority: 10, - ordered: true, - max_latency: Duration::ZERO, - } - .produce(); - let consumer = producer.consume(); - - let sub = consumer - .subscribe(Subscription { - priority: 5, - ordered: false, - max_latency: Duration::from_secs(10), - }) - .unwrap(); - assert!(producer.max_subscription().is_some()); - - drop(sub); - assert_eq!(producer.max_subscription(), None); - } - - #[tokio::test] - async fn poll_max_detects_changes() { - let producer = Track { - name: "test".into(), - priority: 10, - ordered: true, - max_latency: Duration::ZERO, - } - .produce(); - let consumer = producer.consume(); - - // Initially no subscriptions → poll_max should return None immediately. - let result = conducer::wait(|waiter| producer.poll_max(waiter, Some(&Subscription::default()))).await; - assert_eq!(result, None); - - // Subscribe → poll_max with prev=None should return the new subscription. - let _sub = consumer - .subscribe(Subscription { - priority: 5, - ordered: false, - max_latency: Duration::from_secs(10), - }) - .unwrap(); - - let result = conducer::wait(|waiter| producer.poll_max(waiter, None)).await; - assert_eq!( - result, - Some(Subscription { - priority: 5, - ordered: false, - max_latency: Duration::from_secs(10), - }) - ); - } - #[tokio::test] async fn create_group_replaces_aborted() { let mut producer = Track::new("test").produce(); diff --git a/rs/moq-mux/src/catalog.rs b/rs/moq-mux/src/catalog.rs index 37061ddd5..a0dbd1dc9 100644 --- a/rs/moq-mux/src/catalog.rs +++ b/rs/moq-mux/src/catalog.rs @@ -29,12 +29,7 @@ impl CatalogProducer { catalog: hang::Catalog, ) -> Result { let hang_track = broadcast.create_track(hang::Catalog::default_track())?; - let msf_track = broadcast.create_track(moq_lite::Track { - name: moq_msf::DEFAULT_NAME.to_string(), - priority: 100, - ordered: false, - max_latency: std::time::Duration::ZERO, - })?; + let msf_track = broadcast.create_track(moq_lite::Track::new(moq_msf::DEFAULT_NAME))?; Ok(Self { hang_track, diff --git a/rs/moq-relay/src/web.rs b/rs/moq-relay/src/web.rs index bfd9c3f34..86509692e 100644 --- a/rs/moq-relay/src/web.rs +++ b/rs/moq-relay/src/web.rs @@ -277,12 +277,7 @@ async fn serve_fetch( tracing::info!(%broadcast, %track, "fetching track"); - let track = moq_lite::Track { - name: track, - priority: 0, - ordered: false, - max_latency: std::time::Duration::ZERO, - }; + let track = moq_lite::Track::new(track); // NOTE: The auth token is already scoped to the broadcast. let broadcast = origin.consume_broadcast("").ok_or(StatusCode::NOT_FOUND)?; From 5ab015d81c5bedfcc59b4e4b13e5795db024e8b5 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 09:24:11 -0700 Subject: [PATCH 03/14] Inline subscribe_track into run_catalog and run_track Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/libmoq/src/consume.rs | 41 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/rs/libmoq/src/consume.rs b/rs/libmoq/src/consume.rs index f7f9b6fcb..59ce6ca32 100644 --- a/rs/libmoq/src/consume.rs +++ b/rs/libmoq/src/consume.rs @@ -57,14 +57,8 @@ impl Consume { let id = self.catalog_task.insert(Some(entry))?; tokio::spawn(async move { - let res = async { - let catalog = broadcast - .subscribe_track(&hang::catalog::Catalog::default_track()) - .await?; - Self::run_catalog(id, broadcast, catalog.into()).await - }; let res = tokio::select! { - res = res => res, + res = Self::run_catalog(id, broadcast) => res, _ = channel.1 => Ok(()), }; @@ -77,11 +71,11 @@ impl Consume { Ok(id) } - async fn run_catalog( - task_id: Id, - broadcast: moq_lite::BroadcastConsumer, - mut catalog: hang::CatalogConsumer, - ) -> Result<(), Error> { + async fn run_catalog(task_id: Id, broadcast: moq_lite::BroadcastConsumer) -> Result<(), Error> { + let track = broadcast + .subscribe_track(&hang::catalog::Catalog::default_track()) + .await?; + let mut catalog = hang::CatalogConsumer::from(track); while let Some(catalog) = catalog.next().await? { // Unfortunately we need to store the codec information on the heap. let audio_codec = catalog @@ -233,13 +227,8 @@ impl Consume { let id = self.track_task.insert(Some(entry))?; tokio::spawn(async move { - let res = async { - let track = broadcast.subscribe_track(&track_info).await?; - let track = hang::container::OrderedConsumer::new(track, latency); - Self::run_track(id, track).await - }; let res = tokio::select! { - res = res => res, + res = Self::run_track(id, broadcast, &track_info, latency) => res, _ = channel.1 => Ok(()), }; @@ -279,13 +268,8 @@ impl Consume { let id = self.track_task.insert(Some(entry))?; tokio::spawn(async move { - let res = async { - let track = broadcast.subscribe_track(&track_info).await?; - let track = hang::container::OrderedConsumer::new(track, latency); - Self::run_track(id, track).await - }; let res = tokio::select! { - res = res => res, + res = Self::run_track(id, broadcast, &track_info, latency) => res, _ = channel.1 => Ok(()), }; @@ -298,7 +282,14 @@ impl Consume { Ok(id) } - async fn run_track(task_id: Id, mut track: hang::container::OrderedConsumer) -> Result<(), Error> { + async fn run_track( + task_id: Id, + broadcast: moq_lite::BroadcastConsumer, + track_info: &moq_lite::Track, + latency: std::time::Duration, + ) -> Result<(), Error> { + let track = broadcast.subscribe_track(track_info).await?; + let mut track = hang::container::OrderedConsumer::new(track, latency); while let Some(mut ordered) = track.read().await? { // TODO add a chunking API so we don't have to (potentially) allocate a contiguous buffer for the frame. let mut new_payload = hang::container::BufList::new(); From 8d37b5de44a7acc88ba45266e478f0f35b94661b Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 12:28:22 -0700 Subject: [PATCH 04/14] Revert async broadcast, redesign subscription API - Revert broadcast.rs to sync: consume_track() looks up or creates tracks synchronously, requested_track() returns TrackProducer directly - Remove TrackRequest and oneshot channel machinery - Replace TrackSubscription with TrackSubscriber using conducer-based subscriptions for two-level poll aggregation - TrackConsumer::subscribe() is async, blocks until first group exists - TrackSubscriber::recv_group() respects start/end range - Fix create_group aborted replacement: tombstone old entry, push new - Update all callers to use consume_track (sync) or subscribe_track - Fix moq-ffi to remove RUNTIME.block_on() now that consume_track is sync Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/hang/examples/subscribe.rs | 4 +- rs/libmoq/src/consume.rs | 6 +- rs/moq-clock/src/main.rs | 2 +- rs/moq-ffi/src/consumer.rs | 5 +- rs/moq-lite/src/ietf/publisher.rs | 2 +- rs/moq-lite/src/ietf/subscriber.rs | 25 +- rs/moq-lite/src/lite/publisher.rs | 2 +- rs/moq-lite/src/lite/subscriber.rs | 30 +-- rs/moq-lite/src/model/broadcast.rs | 372 +++++++++++------------------ rs/moq-lite/src/model/track.rs | 199 ++++++++++----- rs/moq-native/tests/backend.rs | 10 +- rs/moq-native/tests/broadcast.rs | 15 +- rs/moq-relay/src/web.rs | 2 +- 13 files changed, 307 insertions(+), 367 deletions(-) diff --git a/rs/hang/examples/subscribe.rs b/rs/hang/examples/subscribe.rs index 7861df442..e83020dea 100644 --- a/rs/hang/examples/subscribe.rs +++ b/rs/hang/examples/subscribe.rs @@ -49,7 +49,7 @@ async fn run_subscribe(mut consumer: moq_lite::OriginConsumer) -> anyhow::Result tracing::info!(%path, "broadcast announced"); // Read the catalog to discover available tracks. - let catalog_track = broadcast.subscribe_track(&hang::Catalog::default_track()).await?; + let catalog_track = broadcast.consume_track(&hang::Catalog::default_track())?; let mut catalog = hang::CatalogConsumer::new(catalog_track); let info = catalog.next().await?.ok_or_else(|| anyhow::anyhow!("no catalog"))?; @@ -73,7 +73,7 @@ async fn run_subscribe(mut consumer: moq_lite::OriginConsumer) -> anyhow::Result // Subscribe to the video track. let track = moq_lite::Track::new(name.clone()); - let track_consumer = broadcast.subscribe_track(&track).await?; + let track_consumer = broadcast.consume_track(&track)?; let mut ordered = hang::container::OrderedConsumer::new(track_consumer, Duration::from_millis(500)); // Read frames in presentation order. diff --git a/rs/libmoq/src/consume.rs b/rs/libmoq/src/consume.rs index 59ce6ca32..62f4676e4 100644 --- a/rs/libmoq/src/consume.rs +++ b/rs/libmoq/src/consume.rs @@ -72,9 +72,7 @@ impl Consume { } async fn run_catalog(task_id: Id, broadcast: moq_lite::BroadcastConsumer) -> Result<(), Error> { - let track = broadcast - .subscribe_track(&hang::catalog::Catalog::default_track()) - .await?; + let track = broadcast.consume_track(&hang::catalog::Catalog::default_track())?; let mut catalog = hang::CatalogConsumer::from(track); while let Some(catalog) = catalog.next().await? { // Unfortunately we need to store the codec information on the heap. @@ -288,7 +286,7 @@ impl Consume { track_info: &moq_lite::Track, latency: std::time::Duration, ) -> Result<(), Error> { - let track = broadcast.subscribe_track(track_info).await?; + let track = broadcast.consume_track(track_info)?; let mut track = hang::container::OrderedConsumer::new(track, latency); while let Some(mut ordered) = track.read().await? { // TODO add a chunking API so we don't have to (potentially) allocate a contiguous buffer for the frame. diff --git a/rs/moq-clock/src/main.rs b/rs/moq-clock/src/main.rs index 548464c61..ecb1ed28b 100644 --- a/rs/moq-clock/src/main.rs +++ b/rs/moq-clock/src/main.rs @@ -94,7 +94,7 @@ async fn main() -> anyhow::Result<()> { Some(announce) = origin.announced() => match announce { (path, Some(broadcast)) => { tracing::info!(broadcast = %path, "broadcast is online, subscribing to track"); - let track = broadcast.subscribe_track(&track).await?; + let track = broadcast.consume_track(&track)?; clock = Some(clock::Subscriber::new(track)); } (path, None) => { diff --git a/rs/moq-ffi/src/consumer.rs b/rs/moq-ffi/src/consumer.rs index 4bd02eeb1..6d1f1c6bc 100644 --- a/rs/moq-ffi/src/consumer.rs +++ b/rs/moq-ffi/src/consumer.rs @@ -75,8 +75,7 @@ impl Media { impl MoqBroadcastConsumer { /// Subscribe to the catalog for this broadcast. pub fn subscribe_catalog(&self) -> Result, MoqError> { - let track = - crate::ffi::RUNTIME.block_on(self.inner.subscribe_track(&hang::catalog::Catalog::default_track()))?; + let track = self.inner.consume_track(&hang::catalog::Catalog::default_track())?; let consumer = hang::CatalogConsumer::from(track); Ok(Arc::new(MoqCatalogConsumer { task: Task::new(Catalog { inner: consumer }), @@ -87,7 +86,7 @@ impl MoqBroadcastConsumer { /// /// `max_latency_ms` controls the maximum buffering before skipping a GoP. pub fn subscribe_media(&self, name: String, max_latency_ms: u64) -> Result, MoqError> { - let track = crate::ffi::RUNTIME.block_on(self.inner.subscribe_track(&moq_lite::Track::new(name)))?; + let track = self.inner.consume_track(&moq_lite::Track::new(name))?; let latency = std::time::Duration::from_millis(max_latency_ms); let consumer = hang::container::OrderedConsumer::new(track, latency); Ok(Arc::new(MoqMediaConsumer { diff --git a/rs/moq-lite/src/ietf/publisher.rs b/rs/moq-lite/src/ietf/publisher.rs index 05f4c2033..3433a9d49 100644 --- a/rs/moq-lite/src/ietf/publisher.rs +++ b/rs/moq-lite/src/ietf/publisher.rs @@ -113,7 +113,7 @@ impl Publisher { let track = Track::new(msg.track_name.to_string()); - let track = match broadcast.subscribe_track(&track).await { + let track = match broadcast.consume_track(&track) { Ok(track) => track, Err(err) => { self.write_subscribe_error(&mut stream.writer, request_id, 404, &err.to_string()) diff --git a/rs/moq-lite/src/ietf/subscriber.rs b/rs/moq-lite/src/ietf/subscriber.rs index 688d567cb..aa21f1a66 100644 --- a/rs/moq-lite/src/ietf/subscriber.rs +++ b/rs/moq-lite/src/ietf/subscriber.rs @@ -496,9 +496,9 @@ impl Subscriber { async fn run_broadcast(&self, path: Path<'_>, mut broadcast: BroadcastDynamic) -> Result<(), Error> { loop { - let request = tokio::select! { - request = broadcast.requested_track() => match request { - Ok(request) => request, + let mut track = tokio::select! { + track = broadcast.requested_track() => match track { + Ok(track) => track, Err(err) => { tracing::debug!(%err, "broadcast closed"); break; @@ -510,28 +510,15 @@ impl Subscriber { let mut this = self.clone(); let path = path.to_owned(); - let broadcast = broadcast.clone(); web_async::spawn(async move { - this.run_subscribe(path, request, broadcast).await; + this.run_subscribe(path, &mut track).await; }); } Ok(()) } - async fn run_subscribe( - &mut self, - broadcast_path: Path<'_>, - request: crate::TrackRequest, - broadcast: BroadcastDynamic, - ) { - // Create the TrackProducer and insert into the broadcast lookup. - let mut track = request.info.clone().produce(); - if let Err(err) = broadcast.insert_track(&track) { - request.reject(err); - return; - } - request.respond(track.consume()); + async fn run_subscribe(&mut self, broadcast_path: Path<'_>, track: &mut TrackProducer) { let broadcast = broadcast_path; let request_id = match self.control.next_request_id().await { Ok(id) => id, @@ -565,7 +552,7 @@ impl Subscriber { } // Write Subscribe message - if let Err(err) = self.write_subscribe(&mut stream, request_id, &broadcast, &track).await { + if let Err(err) = self.write_subscribe(&mut stream, request_id, &broadcast, track).await { tracing::debug!(%err, "failed to write subscribe"); self.state.lock().subscribes.remove(&request_id); let _ = track.abort(err); diff --git a/rs/moq-lite/src/lite/publisher.rs b/rs/moq-lite/src/lite/publisher.rs index 6efc99a12..ec043d5e3 100644 --- a/rs/moq-lite/src/lite/publisher.rs +++ b/rs/moq-lite/src/lite/publisher.rs @@ -263,7 +263,7 @@ impl Publisher { let track = Track::new(subscribe.track.to_string()); let broadcast = consumer.ok_or(Error::NotFound)?; - let track = broadcast.subscribe_track(&track).await?; + let track = broadcast.consume_track(&track)?; let info = lite::SubscribeOk { priority: 0, diff --git a/rs/moq-lite/src/lite/subscriber.rs b/rs/moq-lite/src/lite/subscriber.rs index c899b4cd0..40af523d6 100644 --- a/rs/moq-lite/src/lite/subscriber.rs +++ b/rs/moq-lite/src/lite/subscriber.rs @@ -156,9 +156,9 @@ impl Subscriber { loop { // Keep serving requests until there are no more consumers. // This way we'll clean up the task when the broadcast is no longer needed. - let request = tokio::select! { - request = broadcast.requested_track() => match request { - Ok(request) => request, + let mut track = tokio::select! { + track = broadcast.requested_track() => match track { + Ok(track) => track, Err(err) => { tracing::debug!(%err, "broadcast closed"); break; @@ -171,33 +171,15 @@ impl Subscriber { let mut this = self.clone(); let path = path.clone(); - let broadcast = broadcast.clone(); web_async::spawn(async move { - this.run_subscribe(id, path, request, broadcast).await; + this.run_subscribe(id, path, &mut track).await; this.subscribes.lock().remove(&id); }); } } - async fn run_subscribe( - &mut self, - id: u64, - broadcast_path: Path<'_>, - request: crate::TrackRequest, - broadcast: BroadcastDynamic, - ) { - let track_name = request.info.name.clone(); - - // Create the TrackProducer and insert into the broadcast lookup. - let mut track = request.info.clone().produce(); - if let Err(err) = broadcast.insert_track(&track) { - tracing::warn!(%err, track = %track_name, "failed to insert track"); - request.reject(err); - return; - } - - // Respond to the waiting subscribers with the consumer. - request.respond(track.consume()); + async fn run_subscribe(&mut self, id: u64, broadcast_path: Path<'_>, track: &mut TrackProducer) { + let track_name = track.info.name.clone(); self.subscribes.lock().insert(id, track.clone()); diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index 979f055cc..087450da2 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -5,7 +5,7 @@ use std::{ use std::ops::Deref; -use crate::{Error, TrackConsumer, TrackProducer, model::track::TrackWeak}; +use crate::{Error, Subscription, TrackConsumer, TrackProducer, TrackSubscriber, model::track::TrackWeak}; use super::Track; @@ -34,71 +34,22 @@ impl Broadcast { } } -/// A pending track request queued by [`BroadcastConsumer::subscribe_track`]. -/// -/// The dynamic handler receives this via [`BroadcastDynamic::requested_track`], -/// creates the track, inserts it, and responds. -pub struct TrackRequest { - pub info: Track, - replies: Vec>>, -} - -impl TrackRequest { - /// Respond to all waiting subscribers with the given consumer. - pub fn respond(mut self, consumer: TrackConsumer) { - for tx in self.replies.drain(..) { - let _ = tx.send(Ok(consumer.clone())); - } - } - - /// Reject all waiting subscribers with the given error. - pub fn reject(mut self, err: Error) { - for tx in self.replies.drain(..) { - let _ = tx.send(Err(err.clone())); - } - } -} - -impl Drop for TrackRequest { - fn drop(&mut self) { - // If the request is dropped without responding, reject with Cancel. - for tx in self.replies.drain(..) { - let _ = tx.send(Err(Error::Cancel)); - } - } -} - #[derive(Default, Clone)] struct State { // Weak references for deduplication. Doesn't prevent track auto-close. tracks: HashMap, - // Dynamic tracks that have been requested but not yet handled. - requests: Vec, + // Track producers queued by consume_track for the dynamic handler. + requested: Vec, // The current number of dynamic producers. - // If this is 0, requests must be empty. + // If this is 0, requests will fail with NotFound. dynamic: usize, // The error that caused the broadcast to be aborted, if any. abort: Option, } -// TrackRequest contains oneshot senders which aren't Clone, but State derives Clone. -// We need a manual Clone that drops the requests (they can't be cloned). -// Actually, this is already handled because State's Clone impl will try to clone requests. -// Let me remove the derive and implement Clone manually. -impl Clone for TrackRequest { - fn clone(&self) -> Self { - // Requests can't actually be meaningfully cloned; the senders are consumed. - // This is only needed because State derives Clone (for conducer). - Self { - info: self.info.clone(), - replies: Vec::new(), - } - } -} - fn modify(state: &conducer::Producer) -> Result, Error> { match state.write() { Ok(state) => Ok(state), @@ -173,9 +124,9 @@ impl BroadcastProducer { weak.abort(err.clone()); } - // Reject any pending dynamic track requests. - for request in guard.requests.drain(..) { - request.reject(err.clone()); + // Abort any pending requested track producers. + for mut track in guard.requested.drain(..) { + let _ = track.abort(err.clone()); } guard.abort = Some(err); @@ -250,9 +201,9 @@ fn insert_track_impl(state: &conducer::Producer, track: &TrackProducer) - /// Handles on-demand track creation for a broadcast. /// -/// When a consumer requests a track that doesn't exist, a [`TrackRequest`] is queued -/// for the dynamic producer to fulfill via [`Self::requested_track`]. -/// Dropped when no longer needed; pending requests are automatically rejected. +/// When a consumer requests a track that doesn't exist, a [`TrackProducer`] is +/// created and queued. The dynamic handler receives it via [`Self::requested_track`] +/// and starts filling it with data. #[derive(Clone)] pub struct BroadcastDynamic { info: Broadcast, @@ -262,7 +213,6 @@ pub struct BroadcastDynamic { impl BroadcastDynamic { fn new(info: Broadcast, state: conducer::Producer) -> Self { if let Ok(mut state) = state.write() { - // If the broadcast is already closed, we can't handle any new requests. state.dynamic += 1; } @@ -280,25 +230,22 @@ impl BroadcastDynamic { }) } - pub fn poll_requested_track(&mut self, waiter: &conducer::Waiter) -> Poll> { - self.poll(waiter, |state| match state.requests.pop() { - Some(request) => Poll::Ready(request), + pub fn poll_requested_track(&mut self, waiter: &conducer::Waiter) -> Poll> { + self.poll(waiter, |state| match state.requested.pop() { + Some(producer) => Poll::Ready(producer), None => Poll::Pending, }) } - /// Block until a consumer requests a track, returning the request. + /// Block until a consumer requests a track, returning the producer. /// - /// The handler should create a [`TrackProducer`], insert it via [`Self::insert_track`], - /// and then call [`TrackRequest::respond`] with the consumer. - pub async fn requested_track(&mut self) -> Result { + /// The handler should start filling the [`TrackProducer`] with data + /// (e.g., by subscribing upstream). + pub async fn requested_track(&mut self) -> Result { conducer::wait(|waiter| self.poll_requested_track(waiter)).await } /// Insert a track into the broadcast lookup. - /// - /// Call this before responding to a [`TrackRequest`] so that subsequent - /// [`BroadcastConsumer::subscribe_track`] calls find the track immediately. pub fn insert_track(&self, track: &TrackProducer) -> Result<(), Error> { insert_track_impl(&self.state, track) } @@ -320,9 +267,9 @@ impl BroadcastDynamic { weak.abort(err.clone()); } - // Reject any pending dynamic track requests. - for request in guard.requests.drain(..) { - request.reject(err.clone()); + // Abort any pending requested track producers. + for mut track in guard.requested.drain(..) { + let _ = track.abort(err.clone()); } guard.abort = Some(err); @@ -345,9 +292,9 @@ impl Drop for BroadcastDynamic { return; } - // Reject all pending requests since there's no dynamic producer to handle them. - for request in state.requests.drain(..) { - request.reject(Error::Cancel); + // Abort all pending requested tracks since there's no dynamic producer to handle them. + for mut track in state.requested.drain(..) { + let _ = track.abort(Error::Cancel); } } } @@ -358,7 +305,7 @@ use futures::FutureExt; #[cfg(test)] impl BroadcastDynamic { - pub fn assert_request(&mut self) -> TrackRequest { + pub fn assert_request(&mut self) -> TrackProducer { self.requested_track() .now_or_never() .expect("should not have blocked") @@ -386,48 +333,77 @@ impl Deref for BroadcastConsumer { } impl BroadcastConsumer { - /// Subscribe to a track by name. + /// Look up a track by name, returning its consumer if active. /// - /// If the track already exists in the lookup and is still active, returns its consumer - /// immediately. Otherwise, queues a [`TrackRequest`] for the dynamic handler and awaits - /// its response. - pub async fn subscribe_track(&self, track: &Track) -> Result { - let rx = { - // Upgrade to a temporary producer so we can modify the state. - let producer = self - .state - .produce() - .ok_or_else(|| self.state.read().abort.clone().unwrap_or(Error::Dropped))?; - let mut state = modify(&producer)?; - - if let Some(weak) = state.tracks.get(&track.name) { - if !weak.is_closed() { - return Ok(weak.consume()); - } - // Remove the stale entry - state.tracks.remove(&track.name); + /// If the track doesn't exist and a dynamic handler is registered, + /// a [`TrackProducer`] is created, inserted, and queued for the handler. + /// The returned [`TrackConsumer`] is connected to that producer. + pub fn consume_track(&self, track: &Track) -> Result { + let producer = self + .state + .produce() + .ok_or_else(|| self.state.read().abort.clone().unwrap_or(Error::Dropped))?; + let mut state = modify(&producer)?; + + if let Some(weak) = state.tracks.get(&track.name) { + if !weak.is_closed() { + return Ok(weak.consume()); } + // Remove the stale entry + state.tracks.remove(&track.name); + } + + if state.dynamic == 0 { + return Err(Error::NotFound); + } + + // Create a new TrackProducer, insert into lookup, and queue for dynamic handler. + let track_producer = TrackProducer::new(track.clone()); - if state.dynamic == 0 { - return Err(Error::NotFound); + // Insert into the lookup so subsequent consume_track calls find it. + match state.tracks.entry(track.name.clone()) { + hash_map::Entry::Occupied(mut entry) => { + entry.insert(track_producer.weak()); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(track_producer.weak()); } + } - let (tx, rx) = tokio::sync::oneshot::channel(); + // Spawn cleanup task. + let weak = track_producer.weak(); + let consumer_state = producer.consume(); + web_async::spawn(async move { + let _ = weak.unused().await; - // Dedup: if there's already a pending request for this track, piggyback. - if let Some(existing) = state.requests.iter_mut().find(|r| r.info.name == track.name) { - existing.replies.push(tx); - } else { - state.requests.push(TrackRequest { - info: track.clone(), - replies: vec![tx], - }); + let Some(p) = consumer_state.produce() else { + return; + }; + let Ok(mut s) = p.write() else { + return; + }; + + if let Some(current) = s.tracks.remove(&weak.info.name) + && !current.is_clone(&weak) + { + s.tracks.insert(current.info.name.clone(), current); } + }); - rx - }; + let consumer = track_producer.consume(); - rx.await.map_err(|_| Error::Dropped)? + // Queue the producer for the dynamic handler. + state.requested.push(track_producer); + + Ok(consumer) + } + + /// Subscribe to a track, blocking until the first group exists (or finish/abort). + /// + /// Convenience: calls [`Self::consume_track`] then [`TrackConsumer::subscribe`]. + pub async fn subscribe_track(&self, track: &Track, sub: Subscription) -> Result { + let consumer = self.consume_track(track)?; + consumer.subscribe(sub).await } pub async fn closed(&self) -> Error { @@ -443,8 +419,8 @@ impl BroadcastConsumer { #[cfg(test)] impl BroadcastConsumer { - pub async fn assert_subscribe_track(&self, track: &Track) -> TrackConsumer { - self.subscribe_track(track).await.expect("should not have errored") + pub fn assert_consume_track(&self, track: &Track) -> TrackConsumer { + self.consume_track(track).expect("should not have errored") } pub fn assert_not_closed(&self) { @@ -471,14 +447,14 @@ mod test { let consumer = producer.consume(); - let mut track1_sub = consumer.assert_subscribe_track(&Track::new("track1")).await; - track1_sub.assert_group(); + let mut track1_consumer = consumer.assert_consume_track(&Track::new("track1")); + track1_consumer.assert_group(); let mut track2 = Track::new("track2").produce(); producer.assert_insert_track(&track2); let consumer2 = producer.consume(); - let mut track2_consumer = consumer2.assert_subscribe_track(&Track::new("track2")).await; + let mut track2_consumer = consumer2.assert_consume_track(&Track::new("track2")); track2_consumer.assert_no_group(); track2.append_group().unwrap(); @@ -496,11 +472,7 @@ mod test { // Create a new track and insert it into the broadcast. let track1 = producer.assert_create_track(&Track::new("track1")); - let track1c = consumer.assert_subscribe_track(&track1.info).await; - - // Subscribe to a dynamic track -- this will pend until the handler responds. - let consumer2 = consumer.clone(); - let track2_handle = tokio::spawn(async move { consumer2.subscribe_track(&Track::new("track2")).await }); + let track1c = consumer.assert_consume_track(&track1.info); // Explicitly aborting the broadcast should cascade to child tracks. drop(dynamic); @@ -509,10 +481,6 @@ mod test { // track1 should be closed because close() cascades. track1c.assert_error(); assert!(track1.is_closed()); - - // track2 should have been rejected. - let track2_result = track2_handle.await.unwrap(); - assert!(track2_result.is_err()); } #[tokio::test] @@ -523,66 +491,41 @@ mod test { let consumer = broadcast.consume(); let consumer2 = consumer.clone(); - // Subscribe to a dynamic track -- pends until handler responds. - let consumer_clone = consumer.clone(); - let track1_handle = - tokio::spawn(async move { consumer_clone.subscribe_track(&Track::new("track1")).await.unwrap() }); - - // Let the subscribe_track call execute and queue the request. - tokio::task::yield_now().await; + // consume_track with dynamic handler should create a producer and queue it. + let mut track1_consumer = consumer.assert_consume_track(&Track::new("track1")); + track1_consumer.assert_no_group(); // Get the request -- there should be exactly one. - let request = dynamic.assert_request(); + let mut track1_producer = dynamic.assert_request(); dynamic.assert_no_request(); - assert_eq!(request.info.name, "track1"); - - // Handler creates the producer, inserts it, and responds. - let mut track_producer = request.info.clone().produce(); - dynamic.insert_track(&track_producer).unwrap(); - request.respond(track_producer.consume()); + assert_eq!(track1_producer.info.name, "track1"); - // The subscriber should now have a consumer. - let mut track1 = track1_handle.await.unwrap(); - track1.assert_no_group(); + // Dedup: consuming the same track again should return the existing one. + let mut track1_dup = consumer2.assert_consume_track(&Track::new("track1")); + track1_dup.assert_is_clone(&track1_consumer); - // Dedup: subscribing to the same track should return the existing one. - let mut track1_dup = consumer2.assert_subscribe_track(&Track::new("track1")).await; - track1_dup.assert_is_clone(&track1); + // No new request should be queued. + dynamic.assert_no_request(); // Append a group and make sure both get it. - track_producer.append_group().unwrap(); - track1.assert_group(); + track1_producer.append_group().unwrap(); + track1_consumer.assert_group(); track1_dup.assert_group(); - - // Make sure that pending requests are rejected when dynamic is dropped. - let consumer3 = consumer.clone(); - let track2_handle = tokio::spawn(async move { consumer3.subscribe_track(&Track::new("track2")).await }); - tokio::task::yield_now().await; - drop(dynamic); - - let track2_result = track2_handle.await.unwrap(); - assert!(track2_result.is_err(), "should have errored"); } #[tokio::test] async fn stale_producer() { + tokio::time::pause(); + let broadcast = Broadcast::new().produce(); let mut dynamic = broadcast.dynamic(); let consumer = broadcast.consume(); - // Subscribe to a track. - let consumer_clone = consumer.clone(); - let track1_handle = - tokio::spawn(async move { consumer_clone.subscribe_track(&Track::new("track1")).await.unwrap() }); - tokio::task::yield_now().await; + // Subscribe to a track (creates producer via dynamic). + let track1_consumer = consumer.assert_consume_track(&Track::new("track1")); // Handle the request. - let request = dynamic.assert_request(); - let mut producer1 = request.info.clone().produce(); - dynamic.insert_track(&producer1).unwrap(); - request.respond(producer1.consume()); - - let track1 = track1_handle.await.unwrap(); + let mut producer1 = dynamic.assert_request(); // Close the producer (simulating publisher disconnect). producer1.append_group().unwrap(); @@ -590,53 +533,22 @@ mod test { drop(producer1); // The consumer should see the track as closed. - track1.assert_closed(); + track1_consumer.assert_closed(); - // Subscribe again to the same track. - let consumer_clone = consumer.clone(); - let track2_handle = - tokio::spawn(async move { consumer_clone.subscribe_track(&Track::new("track1")).await.unwrap() }); - tokio::task::yield_now().await; + // Advance time to let cleanup task run. + tokio::time::advance(std::time::Duration::from_millis(1)).await; + let track1_consumer_clone = consumer.assert_consume_track(&Track::new("track1")); + drop(track1_consumer); + drop(track1_consumer_clone); + tokio::time::advance(std::time::Duration::from_millis(1)).await; - // Should be a new request. - let request2 = dynamic.assert_request(); - let mut producer2 = request2.info.clone().produce(); - dynamic.insert_track(&producer2).unwrap(); - request2.respond(producer2.consume()); + // Subscribe again to the same track -- should get a new request. + let mut track2_consumer = consumer.assert_consume_track(&Track::new("track1")); - let mut track2 = track2_handle.await.unwrap(); - track2.assert_not_closed(); - track2.assert_not_clone(&track1); + let mut producer2 = dynamic.assert_request(); producer2.append_group().unwrap(); - track2.assert_group(); - } - - #[tokio::test] - async fn dedup_pending() { - let broadcast = Broadcast::new().produce(); - let mut dynamic = broadcast.dynamic(); - let consumer = broadcast.consume(); - - // Two concurrent subscribers for the same track. - let c1 = consumer.clone(); - let c2 = consumer.clone(); - let h1 = tokio::spawn(async move { c1.subscribe_track(&Track::new("t")).await.unwrap() }); - let h2 = tokio::spawn(async move { c2.subscribe_track(&Track::new("t")).await.unwrap() }); - tokio::task::yield_now().await; - - // Only one request should be queued. - let request = dynamic.assert_request(); - dynamic.assert_no_request(); - - // Respond. - let producer = request.info.clone().produce(); - dynamic.insert_track(&producer).unwrap(); - request.respond(producer.consume()); - - let t1 = h1.await.unwrap(); - let t2 = h2.await.unwrap(); - t1.assert_is_clone(&t2); + track2_consumer.assert_group(); } #[tokio::test] @@ -647,17 +559,10 @@ mod test { // Subscribe to a track. let c1 = broadcast.consume(); - let c1_clone = c1.clone(); - let h1 = tokio::spawn(async move { c1_clone.subscribe_track(&Track::new("unknown_track")).await.unwrap() }); - tokio::task::yield_now().await; + let consumer1 = c1.assert_consume_track(&Track::new("unknown_track")); // Handle the request. - let request = dynamic.assert_request(); - let producer1 = request.info.clone().produce(); - dynamic.insert_track(&producer1).unwrap(); - request.respond(producer1.consume()); - - let consumer1 = h1.await.unwrap(); + let producer1 = dynamic.assert_request(); // The track producer should NOT be unused yet because there's a consumer. assert!( @@ -666,7 +571,7 @@ mod test { ); // Making a new consumer will keep the producer alive. - let consumer2 = c1.assert_subscribe_track(&Track::new("unknown_track")).await; + let consumer2 = c1.assert_consume_track(&Track::new("unknown_track")); consumer2.assert_is_clone(&consumer1); drop(consumer1); @@ -687,20 +592,25 @@ mod test { // Now we can subscribe again. let c2 = broadcast.consume(); - let c2_clone = c2.clone(); - let h2 = tokio::spawn(async move { c2_clone.subscribe_track(&Track::new("unknown_track")).await }); - tokio::task::yield_now().await; + let _consumer3 = c2.assert_consume_track(&Track::new("unknown_track")); - let request2 = dynamic.assert_request(); - let producer2 = request2.info.clone().produce(); - dynamic.insert_track(&producer2).unwrap(); - request2.respond(producer2.consume()); - let _ = h2.await.unwrap(); + let producer2 = dynamic.assert_request(); + assert!(!producer2.is_clone(&producer1)); + } - drop(c2); - assert!( - producer2.unused().now_or_never().is_some(), - "track producer should be unused after consumer is dropped" - ); + #[tokio::test] + async fn pending_requests_rejected_on_drop() { + let broadcast = Broadcast::new().produce(); + let dynamic = broadcast.dynamic(); + let consumer = broadcast.consume(); + + // consume_track creates a producer and queues it. + let track_consumer = consumer.assert_consume_track(&Track::new("track2")); + + // Drop dynamic -- pending producers should be aborted. + drop(dynamic); + + // Track consumer should see an error. + track_consumer.assert_error(); } } diff --git a/rs/moq-lite/src/model/track.rs b/rs/moq-lite/src/model/track.rs index a0ad437ef..b9b888c60 100644 --- a/rs/moq-lite/src/model/track.rs +++ b/rs/moq-lite/src/model/track.rs @@ -18,7 +18,6 @@ use super::{Group, GroupConsumer, GroupProducer}; use std::{ collections::{HashSet, VecDeque}, - sync::atomic, task::{Poll, ready}, time::Duration, }; @@ -70,8 +69,8 @@ struct State { final_sequence: Option, abort: Option, - /// Per-consumer subscription entries, keyed by auto-incrementing ID. - subscriptions: Vec<(u64, Subscription)>, + /// Per-subscriber subscription values, read by the producer for aggregation. + subscriptions: Vec>, } impl State { @@ -167,16 +166,29 @@ impl State { return None; } - let priority = self.subscriptions.iter().map(|(_, s)| s.priority).max().unwrap(); + // Read each subscriber's current subscription value into owned copies. + let subs: Vec = self + .subscriptions + .iter() + .filter_map(|c| { + let r = c.read(); + if r.is_closed() { None } else { Some(r.clone()) } + }) + .collect(); + + if subs.is_empty() { + return None; + } + + let priority = subs.iter().map(|s| s.priority).max().unwrap(); // ordered is true only if ALL subscribers want ordered. - let ordered = self.subscriptions.iter().all(|(_, s)| s.ordered); + let ordered = subs.iter().all(|s| s.ordered); // max_latency: max across subscriptions. ZERO = unlimited wins. - let max_latency = self - .subscriptions + let max_latency = subs .iter() - .map(|(_, s)| s.max_latency) + .map(|s| s.max_latency) .reduce(|a, b| { if a.is_zero() || b.is_zero() { Duration::ZERO @@ -187,10 +199,9 @@ impl State { .unwrap(); // start: min across all (earliest requested group). None = latest. - let start = self - .subscriptions + let start = subs .iter() - .map(|(_, s)| s.start) + .map(|s| s.start) .reduce(|a, b| match (a, b) { (Some(a), Some(b)) => Some(a.min(b)), _ => None, @@ -198,10 +209,9 @@ impl State { .unwrap(); // end: max across all (latest / unlimited). None = no end (live). - let end = self - .subscriptions + let end = subs .iter() - .map(|(_, s)| s.end) + .map(|s| s.end) .reduce(|a, b| match (a, b) { (Some(a), Some(b)) => Some(a.max(b)), _ => None, @@ -226,6 +236,18 @@ impl State { Poll::Pending } } + + /// Check if the track is "ready" for a subscriber: has at least one group, + /// is finished (with 0+ groups), or is aborted. + fn poll_ready(&self) -> Poll> { + if self.max_sequence.is_some() || self.final_sequence.is_some() { + Poll::Ready(Ok(())) + } else if let Some(err) = &self.abort { + Poll::Ready(Err(err.clone())) + } else { + Poll::Pending + } + } } /// A producer for a track, used to create new groups. @@ -248,8 +270,8 @@ impl TrackProducer { /// Create a new group with the given sequence number. /// /// If a group with the same sequence already exists but was aborted (e.g. due to a - /// cancelled subscription), it will be replaced. Successfully completed groups - /// return `Err(Error::Duplicate)`. + /// cancelled subscription), it will be replaced with a tombstone and the new group + /// pushed to the end. Successfully completed groups return `Err(Error::Duplicate)`. pub fn create_group(&mut self, info: Group) -> Result { let group = info.produce(); @@ -268,16 +290,18 @@ impl TrackProducer { if !existing.is_aborted() { return Err(Error::Duplicate); } - // Replace the aborted group. - let now = tokio::time::Instant::now(); - *slot = Some((group.clone(), now)); - state.evict_expired(now); - return Ok(group); + // Tombstone the old entry and push the new group at the end. + *slot = None; + break; } } } - // In duplicates set but not in the VecDeque -- shouldn't happen. - return Err(Error::Duplicate); + + let now = tokio::time::Instant::now(); + state.max_sequence = Some(state.max_sequence.unwrap_or(0).max(group.info.sequence)); + state.groups.push_back(Some((group.clone(), now))); + state.evict_expired(now); + return Ok(group); } let now = tokio::time::Instant::now(); @@ -419,6 +443,16 @@ impl TrackProducer { pub fn poll_subscription(&mut self, waiter: &conducer::Waiter) -> Poll> { let prev = self.prev_subscription.as_ref(); match self.state.poll(waiter, |state| { + // Remove closed subscription consumers. + state.subscriptions.retain(|c| !c.read().is_closed()); + + // Also poll each subscription consumer so the waiter is registered + // on inner changes too. + for sub in &state.subscriptions { + // Register the waiter on each subscription's conducer channel. + let _ = sub.poll(waiter, |_| Poll::<()>::Pending); + } + let current = state.subscription(); if current.as_ref() != prev { Poll::Ready(current) @@ -512,42 +546,81 @@ impl TrackWeak { } } -/// Tracks a single consumer's subscription preferences within a track. +/// Iterates groups from a track while managing this subscriber's subscription lifecycle. /// -/// Created via [`TrackConsumer::subscribe`]. Registers this subscription in the +/// Created via [`TrackConsumer::subscribe`]. Registers a subscription in the /// shared state on creation; automatically removes it on drop. -/// Does NOT iterate groups -- purely for subscription lifecycle. -pub struct TrackSubscription { - id: u64, - info: Subscription, - state: conducer::Weak, +/// Blocks in `subscribe()` until the first group exists (or finish/abort). +pub struct TrackSubscriber { + pub info: Track, + state: conducer::Consumer, + sub: conducer::Producer, + index: usize, } -/// Global counter for subscription IDs. -static NEXT_SUB_ID: atomic::AtomicU64 = atomic::AtomicU64::new(0); +impl TrackSubscriber { + /// Receive the next group respecting the subscription start/end range. + pub async fn recv_group(&mut self) -> Result> { + conducer::wait(|waiter| self.poll_recv_group(waiter)).await + } + + fn poll_recv_group(&mut self, waiter: &conducer::Waiter) -> Poll>> { + let sub = self.sub.read(); + let min_sequence = sub.start.unwrap_or(0); + let end = sub.end; + drop(sub); + + let Some((consumer, found_index)) = + ready!(self.poll(waiter, |state| { state.poll_next_group(self.index, min_sequence) })?) + else { + return Poll::Ready(Ok(None)); + }; + + // Check end bound. + if let Some(end) = end { + if consumer.info.sequence > end { + return Poll::Ready(Ok(None)); + } + } + + self.index = found_index + 1; + Poll::Ready(Ok(Some(consumer))) + } -impl TrackSubscription { /// Update this subscription's preferences. pub fn update(&mut self, sub: Subscription) { - if let Ok(mut state) = self.state.write() { - if let Some((_, existing)) = state.subscriptions.iter_mut().find(|(id, _)| *id == self.id) { - *existing = sub.clone(); - } + if let Ok(mut guard) = self.sub.write() { + *guard = sub; } - self.info = sub; + } + + /// Return the latest sequence number in the track. + pub fn latest(&self) -> Option { + self.state.read().max_sequence } /// The current subscription preferences. - pub fn info(&self) -> &Subscription { - &self.info + pub fn subscription(&self) -> Subscription { + self.sub.read().clone() + } + + // A helper to automatically apply Dropped if the state is closed without an error. + fn poll(&self, waiter: &conducer::Waiter, f: F) -> Poll> + where + F: Fn(&conducer::Ref<'_, State>) -> Poll>, + { + Poll::Ready(match ready!(self.state.poll(waiter, f)) { + Ok(res) => res, + Err(state) => Err(state.abort.clone().unwrap_or(Error::Dropped)), + }) } } -impl Drop for TrackSubscription { +impl Drop for TrackSubscriber { fn drop(&mut self) { - if let Ok(mut state) = self.state.write() { - state.subscriptions.retain(|(id, _)| *id != self.id); - } + // Close the subscription producer so the TrackProducer knows to remove it. + // The producer's poll_subscription will clean up closed consumers. + let _ = self.sub.close(); } } @@ -633,24 +706,30 @@ impl TrackConsumer { conducer::wait(|waiter| self.poll_finished(waiter)).await } - /// Register a subscription with the given preferences. + /// Register a subscription and block until the first group exists (or finish/abort). /// - /// The returned [`TrackSubscription`] tracks this consumer's preferences. + /// The returned [`TrackSubscriber`] iterates groups respecting start/end range. /// Dropping it removes the subscription from the aggregate. - /// Cloning a `TrackConsumer` does NOT clone any subscription. - pub fn subscribe(&self, sub: Subscription) -> Result { - let producer = self.state.produce().ok_or(Error::Dropped)?; - let mut state = producer.write().map_err(|_| Error::Dropped)?; - let id = NEXT_SUB_ID.fetch_add(1, atomic::Ordering::Relaxed); - state.subscriptions.push((id, sub.clone())); - drop(state); - let weak = producer.weak(); - drop(producer); - - Ok(TrackSubscription { - id, - info: sub, - state: weak, + pub async fn subscribe(&self, sub: Subscription) -> Result { + // Create a conducer channel for this subscription's preferences. + let sub_producer = conducer::Producer::new(sub); + let sub_consumer = sub_producer.consume(); + + { + // Upgrade to a temporary producer to insert the subscription consumer. + let producer = self.state.produce().ok_or(Error::Dropped)?; + let mut state = producer.write().map_err(|_| Error::Dropped)?; + state.subscriptions.push(sub_consumer); + } + + // Wait until the track is "ready": has at least one group, is finished, or aborted. + conducer::wait(|waiter| self.poll(waiter, |state| state.poll_ready())).await?; + + Ok(TrackSubscriber { + info: self.info.clone(), + state: self.state.clone(), + sub: sub_producer, + index: 0, }) } diff --git a/rs/moq-native/tests/backend.rs b/rs/moq-native/tests/backend.rs index 4324e59f6..a1dbb1476 100644 --- a/rs/moq-native/tests/backend.rs +++ b/rs/moq-native/tests/backend.rs @@ -69,10 +69,7 @@ async fn backend_test(scheme: &str, backend: moq_native::QuicBackend) { assert_eq!(path.as_str(), "test"); let bc = bc.expect("expected announce, got unannounce"); - let mut track_sub = bc - .subscribe_track(&Track::new("video")) - .await - .expect("subscribe_track failed"); + let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) .await @@ -222,10 +219,7 @@ async fn iroh_connect() { assert_eq!(path.as_str(), "test"); let bc = bc.expect("expected announce, got unannounce"); - let mut track_sub = bc - .subscribe_track(&Track::new("video")) - .await - .expect("subscribe_track failed"); + let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) .await diff --git a/rs/moq-native/tests/broadcast.rs b/rs/moq-native/tests/broadcast.rs index b11d60111..4f84e6d38 100644 --- a/rs/moq-native/tests/broadcast.rs +++ b/rs/moq-native/tests/broadcast.rs @@ -87,10 +87,7 @@ async fn broadcast_test(scheme: &str, client_version: Option<&str>, server_versi let bc = bc.expect("expected announce, got unannounce"); // Subscribe to the track. - let mut track_sub = bc - .subscribe_track(&Track::new("video")) - .await - .expect("subscribe_track failed"); + let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); // Read one group. let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) @@ -428,10 +425,7 @@ async fn broadcast_websocket() { let bc = bc.expect("expected announce, got unannounce"); // Subscribe to the track. - let mut track_sub = bc - .subscribe_track(&Track::new("video")) - .await - .expect("subscribe_track failed"); + let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); // Read one group. let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) @@ -536,10 +530,7 @@ async fn broadcast_websocket_fallback() { let bc = bc.expect("expected announce, got unannounce"); // Subscribe to the track. - let mut track_sub = bc - .subscribe_track(&Track::new("video")) - .await - .expect("subscribe_track failed"); + let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.next_group()) .await diff --git a/rs/moq-relay/src/web.rs b/rs/moq-relay/src/web.rs index 86509692e..3f39d74a0 100644 --- a/rs/moq-relay/src/web.rs +++ b/rs/moq-relay/src/web.rs @@ -281,7 +281,7 @@ async fn serve_fetch( // NOTE: The auth token is already scoped to the broadcast. let broadcast = origin.consume_broadcast("").ok_or(StatusCode::NOT_FOUND)?; - let mut track = broadcast.subscribe_track(&track).await.map_err(|err| match err { + let mut track = broadcast.consume_track(&track).map_err(|err| match err { moq_lite::Error::NotFound => StatusCode::NOT_FOUND, _ => StatusCode::INTERNAL_SERVER_ERROR, })?; From 8a5e7211c97a648842b3746416d0ad2422c7d082 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 12:43:26 -0700 Subject: [PATCH 05/14] Move sync setup before spawn, take TrackProducer by value - libmoq: move consume_track + consumer construction before tokio::spawn so errors surface synchronously - lite/ietf subscriber: take TrackProducer by value instead of &mut Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/libmoq/src/consume.rs | 37 +++++++++++++++--------------- rs/moq-lite/src/ietf/subscriber.rs | 8 +++---- rs/moq-lite/src/lite/subscriber.rs | 6 ++--- 3 files changed, 26 insertions(+), 25 deletions(-) diff --git a/rs/libmoq/src/consume.rs b/rs/libmoq/src/consume.rs index 62f4676e4..91a33e11c 100644 --- a/rs/libmoq/src/consume.rs +++ b/rs/libmoq/src/consume.rs @@ -48,6 +48,8 @@ impl Consume { pub fn catalog(&mut self, broadcast: Id, on_catalog: OnStatus) -> Result { let broadcast = self.broadcast.get(broadcast).ok_or(Error::BroadcastNotFound)?.clone(); + let track = broadcast.consume_track(&hang::catalog::Catalog::default_track())?; + let catalog = hang::CatalogConsumer::from(track); let channel = oneshot::channel(); let entry = TaskEntry { @@ -58,7 +60,7 @@ impl Consume { tokio::spawn(async move { let res = tokio::select! { - res = Self::run_catalog(id, broadcast) => res, + res = Self::run_catalog(id, broadcast, catalog) => res, _ = channel.1 => Ok(()), }; @@ -71,9 +73,11 @@ impl Consume { Ok(id) } - async fn run_catalog(task_id: Id, broadcast: moq_lite::BroadcastConsumer) -> Result<(), Error> { - let track = broadcast.consume_track(&hang::catalog::Catalog::default_track())?; - let mut catalog = hang::CatalogConsumer::from(track); + async fn run_catalog( + task_id: Id, + broadcast: moq_lite::BroadcastConsumer, + mut catalog: hang::CatalogConsumer, + ) -> Result<(), Error> { while let Some(catalog) = catalog.next().await? { // Unfortunately we need to store the codec information on the heap. let audio_codec = catalog @@ -214,8 +218,10 @@ impl Consume { .nth(index) .ok_or(Error::NoIndex)?; - let broadcast = consume.broadcast.clone(); - let track_info = moq_lite::Track::new(rendition.clone()); + let track = consume + .broadcast + .consume_track(&moq_lite::Track::new(rendition.clone()))?; + let track = hang::container::OrderedConsumer::new(track, latency); let channel = oneshot::channel(); let entry = TaskEntry { @@ -226,7 +232,7 @@ impl Consume { tokio::spawn(async move { let res = tokio::select! { - res = Self::run_track(id, broadcast, &track_info, latency) => res, + res = Self::run_track(id, track) => res, _ = channel.1 => Ok(()), }; @@ -255,8 +261,10 @@ impl Consume { .nth(index) .ok_or(Error::NoIndex)?; - let broadcast = consume.broadcast.clone(); - let track_info = moq_lite::Track::new(rendition.clone()); + let track = consume + .broadcast + .consume_track(&moq_lite::Track::new(rendition.clone()))?; + let track = hang::container::OrderedConsumer::new(track, latency); let channel = oneshot::channel(); let entry = TaskEntry { @@ -267,7 +275,7 @@ impl Consume { tokio::spawn(async move { let res = tokio::select! { - res = Self::run_track(id, broadcast, &track_info, latency) => res, + res = Self::run_track(id, track) => res, _ = channel.1 => Ok(()), }; @@ -280,14 +288,7 @@ impl Consume { Ok(id) } - async fn run_track( - task_id: Id, - broadcast: moq_lite::BroadcastConsumer, - track_info: &moq_lite::Track, - latency: std::time::Duration, - ) -> Result<(), Error> { - let track = broadcast.consume_track(track_info)?; - let mut track = hang::container::OrderedConsumer::new(track, latency); + async fn run_track(task_id: Id, mut track: hang::container::OrderedConsumer) -> Result<(), Error> { while let Some(mut ordered) = track.read().await? { // TODO add a chunking API so we don't have to (potentially) allocate a contiguous buffer for the frame. let mut new_payload = hang::container::BufList::new(); diff --git a/rs/moq-lite/src/ietf/subscriber.rs b/rs/moq-lite/src/ietf/subscriber.rs index aa21f1a66..df3b47b8f 100644 --- a/rs/moq-lite/src/ietf/subscriber.rs +++ b/rs/moq-lite/src/ietf/subscriber.rs @@ -496,7 +496,7 @@ impl Subscriber { async fn run_broadcast(&self, path: Path<'_>, mut broadcast: BroadcastDynamic) -> Result<(), Error> { loop { - let mut track = tokio::select! { + let track = tokio::select! { track = broadcast.requested_track() => match track { Ok(track) => track, Err(err) => { @@ -511,14 +511,14 @@ impl Subscriber { let path = path.to_owned(); web_async::spawn(async move { - this.run_subscribe(path, &mut track).await; + this.run_subscribe(path, track).await; }); } Ok(()) } - async fn run_subscribe(&mut self, broadcast_path: Path<'_>, track: &mut TrackProducer) { + async fn run_subscribe(&mut self, broadcast_path: Path<'_>, mut track: TrackProducer) { let broadcast = broadcast_path; let request_id = match self.control.next_request_id().await { Ok(id) => id, @@ -552,7 +552,7 @@ impl Subscriber { } // Write Subscribe message - if let Err(err) = self.write_subscribe(&mut stream, request_id, &broadcast, track).await { + if let Err(err) = self.write_subscribe(&mut stream, request_id, &broadcast, &track).await { tracing::debug!(%err, "failed to write subscribe"); self.state.lock().subscribes.remove(&request_id); let _ = track.abort(err); diff --git a/rs/moq-lite/src/lite/subscriber.rs b/rs/moq-lite/src/lite/subscriber.rs index 40af523d6..cf76e80bd 100644 --- a/rs/moq-lite/src/lite/subscriber.rs +++ b/rs/moq-lite/src/lite/subscriber.rs @@ -156,7 +156,7 @@ impl Subscriber { loop { // Keep serving requests until there are no more consumers. // This way we'll clean up the task when the broadcast is no longer needed. - let mut track = tokio::select! { + let track = tokio::select! { track = broadcast.requested_track() => match track { Ok(track) => track, Err(err) => { @@ -172,13 +172,13 @@ impl Subscriber { let path = path.clone(); web_async::spawn(async move { - this.run_subscribe(id, path, &mut track).await; + this.run_subscribe(id, path, track).await; this.subscribes.lock().remove(&id); }); } } - async fn run_subscribe(&mut self, id: u64, broadcast_path: Path<'_>, track: &mut TrackProducer) { + async fn run_subscribe(&mut self, id: u64, broadcast_path: Path<'_>, mut track: TrackProducer) { let track_name = track.info.name.clone(); self.subscribes.lock().insert(id, track.clone()); From 2918a34c58b5f7256e772f01dffb4a831255c7c7 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 12:45:30 -0700 Subject: [PATCH 06/14] Move insert_track into State, extract spawn_track_cleanup Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/moq-lite/src/model/broadcast.rs | 116 ++++++++++++----------------- 1 file changed, 49 insertions(+), 67 deletions(-) diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index 087450da2..5f4f70fdc 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -50,6 +50,24 @@ struct State { abort: Option, } +impl State { + /// Insert a track into the lookup, returning an error if a live track with the same name exists. + fn insert_track(&mut self, track: &TrackProducer) -> Result<(), Error> { + match self.tracks.entry(track.info.name.clone()) { + hash_map::Entry::Occupied(mut entry) => { + if !entry.get().is_closed() { + return Err(Error::Duplicate); + } + entry.insert(track.weak()); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(track.weak()); + } + } + Ok(()) + } +} + fn modify(state: &conducer::Producer) -> Result, Error> { match state.write() { Ok(state) => Ok(state), @@ -57,6 +75,29 @@ fn modify(state: &conducer::Producer) -> Result, } } +/// Spawn a cleanup task that removes the track from the lookup when all consumers are dropped. +fn spawn_track_cleanup(state: &conducer::Producer, track: &TrackProducer) { + let weak = track.weak(); + let consumer_state = state.consume(); + web_async::spawn(async move { + let _ = weak.unused().await; + + let Some(producer) = consumer_state.produce() else { + return; + }; + let Ok(mut state) = producer.write() else { + return; + }; + + // Remove the entry, but reinsert if it was replaced by a different reference. + if let Some(current) = state.tracks.remove(&weak.info.name) + && !current.is_clone(&weak) + { + state.tracks.insert(current.info.name.clone(), current); + } + }); +} + /// Manages tracks within a broadcast. /// /// Insert tracks statically with [Self::insert_track] / [Self::create_track], @@ -159,43 +200,13 @@ impl BroadcastProducer { } } -/// Shared helper to insert a track into the broadcast lookup. +/// Insert a track into the broadcast lookup and spawn a cleanup task. fn insert_track_impl(state: &conducer::Producer, track: &TrackProducer) -> Result<(), Error> { let mut guard = modify(state)?; + guard.insert_track(track)?; + drop(guard); - match guard.tracks.entry(track.info.name.clone()) { - hash_map::Entry::Occupied(mut entry) => { - if !entry.get().is_closed() { - return Err(Error::Duplicate); - } - entry.insert(track.weak()); - } - hash_map::Entry::Vacant(entry) => { - entry.insert(track.weak()); - } - } - - // Spawn cleanup task to remove the track from the lookup when unused. - let weak = track.weak(); - let consumer_state = state.consume(); - web_async::spawn(async move { - let _ = weak.unused().await; - - let Some(producer) = consumer_state.produce() else { - return; - }; - let Ok(mut state) = producer.write() else { - return; - }; - - // Remove the entry, but reinsert if it was replaced by a different reference. - if let Some(current) = state.tracks.remove(&weak.info.name) - && !current.is_clone(&weak) - { - state.tracks.insert(current.info.name.clone(), current); - } - }); - + spawn_track_cleanup(state, track); Ok(()) } @@ -359,41 +370,12 @@ impl BroadcastConsumer { // Create a new TrackProducer, insert into lookup, and queue for dynamic handler. let track_producer = TrackProducer::new(track.clone()); - - // Insert into the lookup so subsequent consume_track calls find it. - match state.tracks.entry(track.name.clone()) { - hash_map::Entry::Occupied(mut entry) => { - entry.insert(track_producer.weak()); - } - hash_map::Entry::Vacant(entry) => { - entry.insert(track_producer.weak()); - } - } - - // Spawn cleanup task. - let weak = track_producer.weak(); - let consumer_state = producer.consume(); - web_async::spawn(async move { - let _ = weak.unused().await; - - let Some(p) = consumer_state.produce() else { - return; - }; - let Ok(mut s) = p.write() else { - return; - }; - - if let Some(current) = s.tracks.remove(&weak.info.name) - && !current.is_clone(&weak) - { - s.tracks.insert(current.info.name.clone(), current); - } - }); - + state.insert_track(&track_producer).expect("just removed stale entry"); let consumer = track_producer.consume(); + state.requested.push(track_producer.clone()); + drop(state); - // Queue the producer for the dynamic handler. - state.requested.push(track_producer); + spawn_track_cleanup(&producer, &track_producer); Ok(consumer) } From 81df59d7da8ef2259643ef2b161d479f53d23e99 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 12:47:28 -0700 Subject: [PATCH 07/14] Simplify consume_track doc comment Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/moq-lite/src/model/broadcast.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index 5f4f70fdc..42a3fa466 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -344,11 +344,7 @@ impl Deref for BroadcastConsumer { } impl BroadcastConsumer { - /// Look up a track by name, returning its consumer if active. - /// - /// If the track doesn't exist and a dynamic handler is registered, - /// a [`TrackProducer`] is created, inserted, and queued for the handler. - /// The returned [`TrackConsumer`] is connected to that producer. + /// Returns the track if it exists, otherwise tries to route it to [`BroadcastDynamic`]. pub fn consume_track(&self, track: &Track) -> Result { let producer = self .state From e594069462fc3721f5383d1f0fe76f6a5d4a5535 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 13:06:41 -0700 Subject: [PATCH 08/14] Fix start/end aggregation: concrete values win over None None now means "no preference" rather than "latest wins all." Callers wanting "latest" should start with None, then update() with a concrete value once latest() is known. Also: replace expect() with ? in consume_track, remove dead comment. Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/moq-lite/src/lite/publisher.rs | 2 -- rs/moq-lite/src/model/broadcast.rs | 2 +- rs/moq-lite/src/model/track.rs | 15 +++++++++------ 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/rs/moq-lite/src/lite/publisher.rs b/rs/moq-lite/src/lite/publisher.rs index ec043d5e3..651c879bd 100644 --- a/rs/moq-lite/src/lite/publisher.rs +++ b/rs/moq-lite/src/lite/publisher.rs @@ -293,8 +293,6 @@ impl Publisher { ) -> Result<(), Error> { let mut tasks = FuturesUnordered::new(); - // No-op: start_at was removed from TrackConsumer - loop { let group = tokio::select! { // Poll all active group futures; never matches but keeps them running. diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index 42a3fa466..0a78d0606 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -366,7 +366,7 @@ impl BroadcastConsumer { // Create a new TrackProducer, insert into lookup, and queue for dynamic handler. let track_producer = TrackProducer::new(track.clone()); - state.insert_track(&track_producer).expect("just removed stale entry"); + state.insert_track(&track_producer)?; let consumer = track_producer.consume(); state.requested.push(track_producer.clone()); drop(state); diff --git a/rs/moq-lite/src/model/track.rs b/rs/moq-lite/src/model/track.rs index b9b888c60..f24cb4ab9 100644 --- a/rs/moq-lite/src/model/track.rs +++ b/rs/moq-lite/src/model/track.rs @@ -53,9 +53,10 @@ pub struct Subscription { pub ordered: bool, /// Maximum cache/latency. `Duration::ZERO` means unlimited. pub max_latency: Duration, - /// First group sequence to deliver. `None` means start at the latest. + /// First group sequence to deliver. `None` means no preference. + /// Use [`TrackSubscriber::update`] to set a concrete value once `latest()` is known. pub start: Option, - /// Last group sequence to deliver. `None` means no end (live). + /// Last group sequence to deliver. `None` means no preference (live). pub end: Option, } @@ -198,23 +199,25 @@ impl State { }) .unwrap(); - // start: min across all (earliest requested group). None = latest. + // start: min across all concrete values. None = no preference (defer to others). let start = subs .iter() .map(|s| s.start) .reduce(|a, b| match (a, b) { (Some(a), Some(b)) => Some(a.min(b)), - _ => None, + (Some(a), None) | (None, Some(a)) => Some(a), + (None, None) => None, }) .unwrap(); - // end: max across all (latest / unlimited). None = no end (live). + // end: max across all concrete values. None = no end (defer to others). let end = subs .iter() .map(|s| s.end) .reduce(|a, b| match (a, b) { (Some(a), Some(b)) => Some(a.max(b)), - _ => None, + (Some(a), None) | (None, Some(a)) => Some(a), + (None, None) => None, }) .unwrap(); From 5845d35ddc5cd5f7e18ce19581fe587d897f1d79 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 14:17:09 -0700 Subject: [PATCH 09/14] Move recv_group to TrackSubscriber, update all callers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove recv_group/poll_recv_group from TrackConsumer (deprecated stubs remain) - Remove index field from TrackConsumer - Add poll_recv_group, recv_group, closed, poll_closed to TrackSubscriber - Rename OrderedConsumer → OrderedSubscriber (takes TrackSubscriber) - CatalogConsumer now takes TrackSubscriber - Update all protocol publishers to subscribe() before iterating groups - Update moq-ffi: subscribe_catalog/subscribe_media are now async - Update libmoq: move subscribe into spawned tasks - Update moq-mux: CatalogProducer::consume() is now async - Fix hang tests: write first group before subscribe to avoid deadlock - Fix hang tests: await tokio::time::sleep in paused-time tests - Add unit tests for subscribe() blocking on first group, finish, abort Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/hang/examples/subscribe.rs | 6 +- rs/hang/src/catalog/consumer.rs | 20 +-- rs/hang/src/container/consumer.rs | 207 +++++++++++++++++++---------- rs/hang/src/container/producer.rs | 9 +- rs/libmoq/src/consume.rs | 17 +-- rs/moq-clock/src/clock.rs | 4 +- rs/moq-clock/src/main.rs | 3 +- rs/moq-ffi/src/consumer.rs | 12 +- rs/moq-ffi/src/test.rs | 22 +-- rs/moq-lite/src/ietf/publisher.rs | 15 ++- rs/moq-lite/src/lite/publisher.rs | 7 +- rs/moq-lite/src/model/broadcast.rs | 27 ++-- rs/moq-lite/src/model/track.rs | 190 ++++++++++++++++++-------- rs/moq-mux/src/catalog.rs | 6 +- rs/moq-native/tests/backend.rs | 16 ++- rs/moq-native/tests/broadcast.rs | 24 +++- rs/moq-relay/src/web.rs | 25 ++-- 17 files changed, 395 insertions(+), 215 deletions(-) diff --git a/rs/hang/examples/subscribe.rs b/rs/hang/examples/subscribe.rs index e83020dea..9e7e0d25b 100644 --- a/rs/hang/examples/subscribe.rs +++ b/rs/hang/examples/subscribe.rs @@ -50,7 +50,8 @@ async fn run_subscribe(mut consumer: moq_lite::OriginConsumer) -> anyhow::Result // Read the catalog to discover available tracks. let catalog_track = broadcast.consume_track(&hang::Catalog::default_track())?; - let mut catalog = hang::CatalogConsumer::new(catalog_track); + let catalog_sub = catalog_track.subscribe(moq_lite::Subscription::default()).await?; + let mut catalog = hang::CatalogConsumer::new(catalog_sub); let info = catalog.next().await?.ok_or_else(|| anyhow::anyhow!("no catalog"))?; @@ -74,7 +75,8 @@ async fn run_subscribe(mut consumer: moq_lite::OriginConsumer) -> anyhow::Result let track = moq_lite::Track::new(name.clone()); let track_consumer = broadcast.consume_track(&track)?; - let mut ordered = hang::container::OrderedConsumer::new(track_consumer, Duration::from_millis(500)); + let track_sub = track_consumer.subscribe(moq_lite::Subscription::default()).await?; + let mut ordered = hang::container::OrderedSubscriber::new(track_sub, Duration::from_millis(500)); // Read frames in presentation order. while let Some(frame) = ordered.read().await? { diff --git a/rs/hang/src/catalog/consumer.rs b/rs/hang/src/catalog/consumer.rs index 1339249a4..33b042716 100644 --- a/rs/hang/src/catalog/consumer.rs +++ b/rs/hang/src/catalog/consumer.rs @@ -2,18 +2,17 @@ use crate::{Catalog, Result}; /// A catalog consumer, used to receive catalog updates and discover tracks. /// -/// This wraps a `moq_lite::TrackConsumer` and automatically deserializes JSON +/// This wraps a `moq_lite::TrackSubscriber` and automatically deserializes JSON /// catalog data to discover available audio and video tracks in a broadcast. -#[derive(Clone)] pub struct CatalogConsumer { - /// Access to the underlying track consumer. - pub track: moq_lite::TrackConsumer, + /// Access to the underlying track subscriber. + pub track: moq_lite::TrackSubscriber, group: Option, } impl CatalogConsumer { - /// Create a new catalog consumer from a MoQ track consumer. - pub fn new(track: moq_lite::TrackConsumer) -> Self { + /// Create a new catalog consumer from a MoQ track subscriber. + pub fn new(track: moq_lite::TrackSubscriber) -> Self { Self { track, group: None } } @@ -42,15 +41,10 @@ impl CatalogConsumer { } } } - - /// Wait until the catalog track is closed. - pub async fn closed(&self) -> Result<()> { - Ok(self.track.closed().await?) - } } -impl From for CatalogConsumer { - fn from(inner: moq_lite::TrackConsumer) -> Self { +impl From for CatalogConsumer { + fn from(inner: moq_lite::TrackSubscriber) -> Self { Self::new(inner) } } diff --git a/rs/hang/src/container/consumer.rs b/rs/hang/src/container/consumer.rs index e7e18c1d4..08b8a7ae0 100644 --- a/rs/hang/src/container/consumer.rs +++ b/rs/hang/src/container/consumer.rs @@ -6,7 +6,7 @@ use buf_list::BufList; use super::{Frame, Timestamp}; use crate::Error; -/// A frame returned by [`OrderedConsumer::read()`] with group context. +/// A frame returned by [`OrderedSubscriber::read()`] with group context. #[derive(Clone, Debug)] pub struct OrderedFrame { /// The presentation timestamp for this frame. @@ -44,15 +44,15 @@ impl From for Frame { /// A consumer for hang-formatted media tracks with timestamp reordering. /// -/// This wraps a `moq_lite::TrackConsumer` and adds hang-specific functionality +/// This wraps a `moq_lite::TrackSubscriber` and adds hang-specific functionality /// like timestamp decoding, latency management, and frame buffering. /// /// ## Latency Management /// /// The consumer can skip groups that are too far behind to maintain low latency. /// Configure the maximum acceptable delay through the consumer's latency settings. -pub struct OrderedConsumer { - pub track: moq_lite::TrackConsumer, +pub struct OrderedSubscriber { + pub track: moq_lite::TrackSubscriber, // The current group that we want to read from current: u64, @@ -68,9 +68,9 @@ pub struct OrderedConsumer { max_latency: std::time::Duration, } -impl OrderedConsumer { - /// Create a new OrderedConsumer wrapping the given moq-lite consumer. - pub fn new(track: moq_lite::TrackConsumer, max_latency: std::time::Duration) -> Self { +impl OrderedSubscriber { + /// Create a new OrderedSubscriber wrapping the given moq-lite subscriber. + pub fn new(track: moq_lite::TrackSubscriber, max_latency: std::time::Duration) -> Self { Self { track, current: 0, @@ -227,14 +227,8 @@ impl OrderedConsumer { } } -impl From for moq_lite::TrackConsumer { - fn from(inner: OrderedConsumer) -> Self { - inner.track - } -} - -impl std::ops::Deref for OrderedConsumer { - type Target = moq_lite::TrackConsumer; +impl std::ops::Deref for OrderedSubscriber { + type Target = moq_lite::TrackSubscriber; fn deref(&self) -> &Self::Target { &self.track @@ -395,7 +389,7 @@ mod tests { } /// Drain all available frames with a per-read timeout. - async fn read_all(consumer: &mut OrderedConsumer) -> Result, crate::Error> { + async fn read_all(consumer: &mut OrderedSubscriber) -> Result, crate::Error> { let mut frames = Vec::new(); loop { match tokio::time::timeout(Duration::from_millis(200), consumer.read()).await { @@ -403,7 +397,7 @@ mod tests { Ok(Ok(None)) => break, Ok(Err(e)) => return Err(e), Err(_) => panic!( - "read_all: OrderedConsumer::read timed out after 200ms ({} frames collected so far)", + "read_all: OrderedSubscriber::read timed out after 200ms ({} frames collected so far)", frames.len() ), } @@ -417,10 +411,12 @@ mod tests { async fn read_single_group() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - write_group(&mut track, 0, &[ts(0)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 1); @@ -435,10 +431,12 @@ mod tests { async fn read_multiple_frames_single_group() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - write_group(&mut track, 0, &[ts(0), ts(33_000), ts(66_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 3); @@ -455,13 +453,15 @@ mod tests { async fn read_multiple_groups_within_latency() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - // 5 groups, 20ms spacing. Total span = 80ms, well within 500ms latency. for i in 0..5u64 { write_group(&mut track, i, &[ts(i * 20_000)]); } track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 5); @@ -478,7 +478,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(100)); // Group 0: 5 frames, NOT finished (blocks consumer) let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -498,6 +497,11 @@ mod tests { } track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(100), + ); + // Finish group 0 after consumer has had time to accumulate pending groups let finisher = tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(50)).await; @@ -515,7 +519,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::ZERO); // Group 0: 1 frame at a HIGH timestamp, NOT finished. // This makes the cutoff high (max_timestamp + 0 = 400ms), so @@ -537,6 +540,11 @@ mod tests { } track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::ZERO, + ); + let finisher = tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(50)).await; group0.finish().unwrap(); @@ -556,7 +564,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(100)); // Group 0: 1 frame, NOT finished let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -573,6 +580,11 @@ mod tests { } track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(100), + ); + let finisher = tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(50)).await; group0.finish().unwrap(); @@ -602,7 +614,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); // Group 0: 1 frame, NOT finished (blocks consumer, lets groups 2 and 1 accumulate in pending) let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -618,6 +629,11 @@ mod tests { write_group(&mut track, 1, &[ts(30_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); + // Finish group 0 so the consumer can proceed to pending groups let finisher = tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(10)).await; @@ -638,11 +654,13 @@ mod tests { async fn adjacent_group_flushed_immediately() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - write_group(&mut track, 0, &[ts(0)]); write_group(&mut track, 1, &[ts(30_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 2); @@ -656,11 +674,13 @@ mod tests { async fn bframes_within_group() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - // B-frame decode order: timestamps [0, 66ms, 33ms] write_group(&mut track, 0, &[ts(0), ts(66_000), ts(33_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 3); @@ -677,9 +697,11 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let result = tokio::time::timeout(Duration::from_millis(200), consumer.read()).await; match result { @@ -695,9 +717,11 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - write_group(&mut track, 0, &[ts(0)]); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); track.abort(moq_lite::Error::Cancel).unwrap(); // Consumer should not hang; it should return frames or error gracefully @@ -718,17 +742,13 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - - // closed() should not resolve yet - assert!( - tokio::time::timeout(Duration::from_millis(50), consumer.closed()) - .await - .is_err() - ); - - // finish() + drop triggers the Closed/Dropped state that closed() waits for + // finish() triggers the ready state so subscribe() can proceed track.finish().unwrap(); + let consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); + // drop triggers the Closed/Dropped state that closed() waits for drop(track); // closed() should resolve now @@ -744,8 +764,6 @@ mod tests { async fn gap_in_group_sequence_recovery() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(100)); - // Groups 0, 1 then skip 2, write 3-6 write_group(&mut track, 0, &[ts(0), ts(20_000)]); write_group(&mut track, 1, &[ts(40_000), ts(60_000)]); @@ -755,6 +773,10 @@ mod tests { write_group(&mut track, 5, &[ts(200_000), ts(220_000)]); write_group(&mut track, 6, &[ts(240_000), ts(260_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(100), + ); // Consumer must not deadlock on the missing group 2 let frames = read_all(&mut consumer).await.unwrap(); @@ -765,14 +787,16 @@ mod tests { async fn gap_at_start_of_sequence() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(80)); - // First group at sequence 5 (simulating joining mid-stream), gap at 6 write_group(&mut track, 5, &[ts(0), ts(20_000)]); write_group(&mut track, 7, &[ts(80_000), ts(100_000)]); write_group(&mut track, 8, &[ts(120_000), ts(140_000)]); write_group(&mut track, 9, &[ts(160_000), ts(180_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(80), + ); let frames = read_all(&mut consumer).await.unwrap(); assert!(frames.len() >= 4, "Expected >= 4 frames, got {}", frames.len()); @@ -784,10 +808,12 @@ mod tests { async fn frame_timestamp_and_index_decoding() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - write_group(&mut track, 0, &[ts(0), ts(33_333), ts(66_666)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 3); @@ -806,7 +832,6 @@ mod tests { async fn frame_payload_preserved() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); let payload_bytes = vec![0x01, 0x02, 0x03, 0x04, 0x05]; let mut group = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -818,6 +843,10 @@ mod tests { .unwrap(); group.finish().unwrap(); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 1); @@ -844,7 +873,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_secs(10)); // Group 0: 1 frame, NOT finished let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -858,6 +886,11 @@ mod tests { // Group 1: finished (buffer_until will buffer its frames) write_group(&mut track, 1, &[ts(100_000)]); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_secs(10), + ); + let finisher = tokio::spawn(async move { // After consumer has buffered group 1's frames via buffer_until... tokio::time::sleep(Duration::from_millis(20)).await; @@ -890,12 +923,14 @@ mod tests { async fn large_timestamps() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_secs(3700)); - // 1 hour = 3,600,000,000 microseconds let one_hour = 3_600_000_000u64; write_group(&mut track, 0, &[ts(one_hour)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_secs(3700), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 1); @@ -907,10 +942,12 @@ mod tests { async fn set_max_latency_changes_behavior() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_secs(10)); - write_group(&mut track, 0, &[ts(0)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_secs(10), + ); // Read with initial large latency let frame = consumer.read().await.unwrap().unwrap(); @@ -937,7 +974,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(40)); // Group 0: B-frame decode order [0, 66ms, 33ms], NOT finished (blocks consumer) let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -954,6 +990,11 @@ mod tests { write_group(&mut track, 1, &[ts(100_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(40), + ); + // Finish group 0 after consumer has had time to accumulate pending groups let finisher = tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(50)).await; @@ -985,8 +1026,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - // max_latency = 100ms. - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(100)); // Groups 3, 5, 7 — non-sequential with gaps. // After startup selects group 3 (earliest with data), consumer reads it, @@ -1003,6 +1042,12 @@ mod tests { .encode(&mut group7) .unwrap(); + // max_latency = 100ms. + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(100), + ); + let finisher = tokio::spawn(async move { // Wait for the consumer to process groups 3 and 5, then push // a second frame on group 7 with a high enough timestamp to @@ -1042,13 +1087,15 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - // Group 5: no frames written yet (pending) let _group5 = track.create_group(moq_lite::Group { sequence: 5 }).unwrap(); // Group 7: has data write_group(&mut track, 7, &[ts(210_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = tokio::time::timeout(Duration::from_millis(500), async { let mut frames = Vec::new(); @@ -1069,11 +1116,13 @@ mod tests { async fn startup_single_group_mid_stream() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - // Only group 100 exists. write_group(&mut track, 100, &[ts(3_000_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 1); @@ -1085,7 +1134,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(50)); // Group 0: blocks let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -1102,6 +1150,11 @@ mod tests { write_group(&mut track, 3, &[ts(300_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(50), + ); + let finisher = tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(20)).await; group0.finish().unwrap(); @@ -1117,7 +1170,6 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(100)); // Group 0: blocks let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); @@ -1132,6 +1184,11 @@ mod tests { write_group(&mut track, 1, &[ts(100_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(100), + ); + let finisher = tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(20)).await; group0.finish().unwrap(); @@ -1146,8 +1203,6 @@ mod tests { async fn group_error_skips_to_next() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - // Group 0: aborted let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); group0.abort(moq_lite::Error::Cancel).unwrap(); @@ -1155,6 +1210,10 @@ mod tests { // Group 1: valid write_group(&mut track, 1, &[ts(30_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 1); @@ -1166,9 +1225,11 @@ mod tests { tokio::time::pause(); let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - write_group(&mut track, 0, &[ts(0)]); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); // Finish the track after a delay, simulating incremental arrival. let finisher = tokio::spawn(async move { @@ -1196,8 +1257,6 @@ mod tests { async fn empty_group_advances() { let mut track = moq_lite::Track::new("test").produce(); let consumer_track = track.consume(); - let mut consumer = OrderedConsumer::new(consumer_track, Duration::from_millis(500)); - // Group 0: empty (no frames, just finished) let mut group0 = track.create_group(moq_lite::Group { sequence: 0 }).unwrap(); group0.finish().unwrap(); @@ -1205,6 +1264,10 @@ mod tests { // Group 1: has data write_group(&mut track, 1, &[ts(30_000)]); track.finish().unwrap(); + let mut consumer = OrderedSubscriber::new( + consumer_track.subscribe(Default::default()).await.unwrap(), + Duration::from_millis(500), + ); let frames = read_all(&mut consumer).await.unwrap(); assert_eq!(frames.len(), 1); diff --git a/rs/hang/src/container/producer.rs b/rs/hang/src/container/producer.rs index f179e364d..3ab9a8ff7 100644 --- a/rs/hang/src/container/producer.rs +++ b/rs/hang/src/container/producer.rs @@ -1,4 +1,4 @@ -use super::{Frame, OrderedConsumer, Timestamp}; +use super::{Frame, Timestamp}; use crate::Error; /// A producer for media tracks with group management. @@ -130,11 +130,8 @@ impl OrderedProducer { } /// Create a consumer for this track. - /// - /// Multiple consumers can be created from the same producer, each receiving - /// a copy of all data written to the track. - pub fn consume(&self, max_latency: std::time::Duration) -> OrderedConsumer { - OrderedConsumer::new(self.track.consume(), max_latency) + pub fn consume(&self) -> moq_lite::TrackConsumer { + self.track.consume() } } diff --git a/rs/libmoq/src/consume.rs b/rs/libmoq/src/consume.rs index 91a33e11c..761850431 100644 --- a/rs/libmoq/src/consume.rs +++ b/rs/libmoq/src/consume.rs @@ -49,7 +49,6 @@ impl Consume { pub fn catalog(&mut self, broadcast: Id, on_catalog: OnStatus) -> Result { let broadcast = self.broadcast.get(broadcast).ok_or(Error::BroadcastNotFound)?.clone(); let track = broadcast.consume_track(&hang::catalog::Catalog::default_track())?; - let catalog = hang::CatalogConsumer::from(track); let channel = oneshot::channel(); let entry = TaskEntry { @@ -60,7 +59,7 @@ impl Consume { tokio::spawn(async move { let res = tokio::select! { - res = Self::run_catalog(id, broadcast, catalog) => res, + res = Self::run_catalog(id, broadcast, track) => res, _ = channel.1 => Ok(()), }; @@ -76,8 +75,10 @@ impl Consume { async fn run_catalog( task_id: Id, broadcast: moq_lite::BroadcastConsumer, - mut catalog: hang::CatalogConsumer, + track: moq_lite::TrackConsumer, ) -> Result<(), Error> { + let subscriber = track.subscribe(moq_lite::Subscription::default()).await?; + let mut catalog = hang::CatalogConsumer::new(subscriber); while let Some(catalog) = catalog.next().await? { // Unfortunately we need to store the codec information on the heap. let audio_codec = catalog @@ -221,7 +222,6 @@ impl Consume { let track = consume .broadcast .consume_track(&moq_lite::Track::new(rendition.clone()))?; - let track = hang::container::OrderedConsumer::new(track, latency); let channel = oneshot::channel(); let entry = TaskEntry { @@ -232,7 +232,7 @@ impl Consume { tokio::spawn(async move { let res = tokio::select! { - res = Self::run_track(id, track) => res, + res = Self::run_track(id, track, latency) => res, _ = channel.1 => Ok(()), }; @@ -264,7 +264,6 @@ impl Consume { let track = consume .broadcast .consume_track(&moq_lite::Track::new(rendition.clone()))?; - let track = hang::container::OrderedConsumer::new(track, latency); let channel = oneshot::channel(); let entry = TaskEntry { @@ -275,7 +274,7 @@ impl Consume { tokio::spawn(async move { let res = tokio::select! { - res = Self::run_track(id, track) => res, + res = Self::run_track(id, track, latency) => res, _ = channel.1 => Ok(()), }; @@ -288,7 +287,9 @@ impl Consume { Ok(id) } - async fn run_track(task_id: Id, mut track: hang::container::OrderedConsumer) -> Result<(), Error> { + async fn run_track(task_id: Id, track: moq_lite::TrackConsumer, latency: std::time::Duration) -> Result<(), Error> { + let subscriber = track.subscribe(moq_lite::Subscription::default()).await?; + let mut track = hang::container::OrderedSubscriber::new(subscriber, latency); while let Some(mut ordered) = track.read().await? { // TODO add a chunking API so we don't have to (potentially) allocate a contiguous buffer for the frame. let mut new_payload = hang::container::BufList::new(); diff --git a/rs/moq-clock/src/clock.rs b/rs/moq-clock/src/clock.rs index 527f218a6..67e16f3e3 100644 --- a/rs/moq-clock/src/clock.rs +++ b/rs/moq-clock/src/clock.rs @@ -71,11 +71,11 @@ impl Publisher { } } pub struct Subscriber { - track: TrackConsumer, + track: TrackSubscriber, } impl Subscriber { - pub fn new(track: TrackConsumer) -> Self { + pub fn new(track: TrackSubscriber) -> Self { Self { track } } diff --git a/rs/moq-clock/src/main.rs b/rs/moq-clock/src/main.rs index ecb1ed28b..44408f05f 100644 --- a/rs/moq-clock/src/main.rs +++ b/rs/moq-clock/src/main.rs @@ -95,7 +95,8 @@ async fn main() -> anyhow::Result<()> { (path, Some(broadcast)) => { tracing::info!(broadcast = %path, "broadcast is online, subscribing to track"); let track = broadcast.consume_track(&track)?; - clock = Some(clock::Subscriber::new(track)); + let subscriber = track.subscribe(Subscription::default()).await?; + clock = Some(clock::Subscriber::new(subscriber)); } (path, None) => { tracing::warn!(broadcast = %path, "broadcast is offline, waiting..."); diff --git a/rs/moq-ffi/src/consumer.rs b/rs/moq-ffi/src/consumer.rs index 6d1f1c6bc..4ed8edae0 100644 --- a/rs/moq-ffi/src/consumer.rs +++ b/rs/moq-ffi/src/consumer.rs @@ -42,7 +42,7 @@ pub struct MoqMediaConsumer { } struct Media { - inner: hang::container::OrderedConsumer, + inner: hang::container::OrderedSubscriber, } impl Media { @@ -74,9 +74,10 @@ impl Media { #[uniffi::export] impl MoqBroadcastConsumer { /// Subscribe to the catalog for this broadcast. - pub fn subscribe_catalog(&self) -> Result, MoqError> { + pub async fn subscribe_catalog(&self) -> Result, MoqError> { let track = self.inner.consume_track(&hang::catalog::Catalog::default_track())?; - let consumer = hang::CatalogConsumer::from(track); + let subscriber = track.subscribe(moq_lite::Subscription::default()).await?; + let consumer = hang::CatalogConsumer::new(subscriber); Ok(Arc::new(MoqCatalogConsumer { task: Task::new(Catalog { inner: consumer }), })) @@ -85,10 +86,11 @@ impl MoqBroadcastConsumer { /// Subscribe to a media track by name, delivering frames in decode order. /// /// `max_latency_ms` controls the maximum buffering before skipping a GoP. - pub fn subscribe_media(&self, name: String, max_latency_ms: u64) -> Result, MoqError> { + pub async fn subscribe_media(&self, name: String, max_latency_ms: u64) -> Result, MoqError> { let track = self.inner.consume_track(&moq_lite::Track::new(name))?; + let subscriber = track.subscribe(moq_lite::Subscription::default()).await?; let latency = std::time::Duration::from_millis(max_latency_ms); - let consumer = hang::container::OrderedConsumer::new(track, latency); + let consumer = hang::container::OrderedSubscriber::new(subscriber, latency); Ok(Arc::new(MoqMediaConsumer { task: Task::new(Media { inner: consumer }), })) diff --git a/rs/moq-ffi/src/test.rs b/rs/moq-ffi/src/test.rs index 10150767a..030b06eff 100644 --- a/rs/moq-ffi/src/test.rs +++ b/rs/moq-ffi/src/test.rs @@ -83,7 +83,7 @@ async fn local_publish_consume_audio() { assert_eq!(announcement.path(), "live"); let broadcast_consumer = announcement.broadcast(); - let catalog_consumer = broadcast_consumer.subscribe_catalog().unwrap(); + let catalog_consumer = broadcast_consumer.subscribe_catalog().await.unwrap(); let catalog = tokio::time::timeout(TIMEOUT, catalog_consumer.next()) .await @@ -98,7 +98,10 @@ async fn local_publish_consume_audio() { assert_eq!(audio.channel_count, 2); assert!(catalog.video.is_empty()); - let media_consumer = broadcast_consumer.subscribe_media(track_name.clone(), 10_000).unwrap(); + let media_consumer = broadcast_consumer + .subscribe_media(track_name.clone(), 10_000) + .await + .unwrap(); let payload = b"opus audio payload data".to_vec(); media.write_frame(payload.clone(), 1_000_000).unwrap(); @@ -131,7 +134,7 @@ async fn video_publish_consume() { .expect("expected announcement"); let broadcast_consumer = announcement.broadcast(); - let catalog_consumer = broadcast_consumer.subscribe_catalog().unwrap(); + let catalog_consumer = broadcast_consumer.subscribe_catalog().await.unwrap(); let catalog = tokio::time::timeout(TIMEOUT, catalog_consumer.next()) .await @@ -151,7 +154,10 @@ async fn video_publish_consume() { assert_eq!(coded.height, 720); assert!(catalog.audio.is_empty()); - let media_consumer = broadcast_consumer.subscribe_media(track_name.clone(), 10_000).unwrap(); + let media_consumer = broadcast_consumer + .subscribe_media(track_name.clone(), 10_000) + .await + .unwrap(); let keyframe = vec![0x00, 0x00, 0x00, 0x01, 0x65, 0xAA, 0xBB, 0xCC]; media.write_frame(keyframe, 0).unwrap(); @@ -183,7 +189,7 @@ async fn multiple_frames_ordering() { .unwrap(); let broadcast_consumer = announcement.broadcast(); - let catalog_consumer = broadcast_consumer.subscribe_catalog().unwrap(); + let catalog_consumer = broadcast_consumer.subscribe_catalog().await.unwrap(); let catalog = tokio::time::timeout(TIMEOUT, catalog_consumer.next()) .await .unwrap() @@ -191,7 +197,7 @@ async fn multiple_frames_ordering() { .unwrap(); let track_name = catalog.audio.keys().next().unwrap().clone(); - let media_consumer = broadcast_consumer.subscribe_media(track_name, 10_000).unwrap(); + let media_consumer = broadcast_consumer.subscribe_media(track_name, 10_000).await.unwrap(); let timestamps: [u64; 5] = [0, 20_000, 40_000, 60_000, 80_000]; for (i, &ts) in timestamps.iter().enumerate() { @@ -229,7 +235,7 @@ async fn catalog_update_on_new_track() { .unwrap(); let broadcast_consumer = announcement.broadcast(); - let catalog_consumer = broadcast_consumer.subscribe_catalog().unwrap(); + let catalog_consumer = broadcast_consumer.subscribe_catalog().await.unwrap(); let catalog1 = tokio::time::timeout(TIMEOUT, catalog_consumer.next()) .await @@ -278,7 +284,7 @@ async fn announced_broadcast() { .expect("expected announcement"); assert_eq!(announcement.path(), "test/broadcast"); - let _catalog = announcement.broadcast().subscribe_catalog().unwrap(); + let _catalog = announcement.broadcast().subscribe_catalog().await.unwrap(); } #[test] diff --git a/rs/moq-lite/src/ietf/publisher.rs b/rs/moq-lite/src/ietf/publisher.rs index a90f25f6c..e8f9ebc04 100644 --- a/rs/moq-lite/src/ietf/publisher.rs +++ b/rs/moq-lite/src/ietf/publisher.rs @@ -5,7 +5,7 @@ use web_async::FuturesExt; use web_transport_trait::SendStream; use crate::{ - AsPath, Error, Origin, OriginConsumer, Track, TrackConsumer, + AsPath, Error, Origin, OriginConsumer, Subscription, Track, TrackSubscriber, coding::{Stream, Writer}, ietf::{self, Control, FetchHeader, FetchType, FilterType, GroupOrder, Location, RequestId}, model::GroupConsumer, @@ -122,6 +122,15 @@ impl Publisher { } }; + let subscriber = match track.subscribe(Subscription::default()).await { + Ok(sub) => sub, + Err(err) => { + self.write_subscribe_error(&mut stream.writer, request_id, 404, &err.to_string()) + .await?; + return Ok(()); + } + }; + // Send SubscribeOk on the stream stream.writer.encode(&ietf::SubscribeOk::ID).await?; stream @@ -137,7 +146,7 @@ impl Publisher { // Run the track, cancelling on reader close (Unsubscribe or stream close) let res = tokio::select! { - res = self.run_track(track, request_id) => res, + res = self.run_track(subscriber, request_id) => res, _ = stream.reader.closed() => Ok(()), _ = self.session.closed() => Ok(()), }; @@ -212,7 +221,7 @@ impl Publisher { } /// Serve a track using FuturesUnordered for unlimited concurrent groups. - async fn run_track(&self, mut track: TrackConsumer, request_id: RequestId) -> Result<(), Error> { + async fn run_track(&self, mut track: TrackSubscriber, request_id: RequestId) -> Result<(), Error> { let mut tasks = FuturesUnordered::new(); loop { diff --git a/rs/moq-lite/src/lite/publisher.rs b/rs/moq-lite/src/lite/publisher.rs index cda36d65c..92d0a3881 100644 --- a/rs/moq-lite/src/lite/publisher.rs +++ b/rs/moq-lite/src/lite/publisher.rs @@ -5,7 +5,7 @@ use web_async::FuturesExt; use web_transport_trait::Stats; use crate::{ - AsPath, BroadcastConsumer, Error, Origin, OriginConsumer, Track, TrackConsumer, + AsPath, BroadcastConsumer, Error, Origin, OriginConsumer, Subscription, Track, TrackSubscriber, coding::{Stream, Writer}, lite::{ self, @@ -264,6 +264,7 @@ impl Publisher { let broadcast = consumer.ok_or(Error::NotFound)?; let track = broadcast.consume_track(&track)?; + let subscriber = track.subscribe(Subscription::default()).await?; let info = lite::SubscribeOk { priority: 0, @@ -276,7 +277,7 @@ impl Publisher { stream.writer.encode(&lite::SubscribeResponse::Ok(info)).await?; tokio::select! { - res = Self::run_track(session, track, subscribe, priority, version) => res?, + res = Self::run_track(session, subscriber, subscribe, priority, version) => res?, res = stream.reader.closed() => res?, } @@ -286,7 +287,7 @@ impl Publisher { async fn run_track( session: S, - mut track: TrackConsumer, + mut track: TrackSubscriber, subscribe: &lite::Subscribe<'_>, priority: PriorityQueue, version: Version, diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index 0a78d0606..50cdc8a9c 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -425,19 +425,18 @@ mod test { let consumer = producer.consume(); - let mut track1_consumer = consumer.assert_consume_track(&Track::new("track1")); - track1_consumer.assert_group(); + let track1_consumer = consumer.assert_consume_track(&Track::new("track1")); + assert_eq!(track1_consumer.latest(), Some(0)); let mut track2 = Track::new("track2").produce(); producer.assert_insert_track(&track2); let consumer2 = producer.consume(); - let mut track2_consumer = consumer2.assert_consume_track(&Track::new("track2")); - track2_consumer.assert_no_group(); + let track2_consumer = consumer2.assert_consume_track(&Track::new("track2")); + assert_eq!(track2_consumer.latest(), None); track2.append_group().unwrap(); - - track2_consumer.assert_group(); + assert_eq!(track2_consumer.latest(), Some(0)); } #[tokio::test] @@ -470,8 +469,8 @@ mod test { let consumer2 = consumer.clone(); // consume_track with dynamic handler should create a producer and queue it. - let mut track1_consumer = consumer.assert_consume_track(&Track::new("track1")); - track1_consumer.assert_no_group(); + let track1_consumer = consumer.assert_consume_track(&Track::new("track1")); + assert_eq!(track1_consumer.latest(), None); // Get the request -- there should be exactly one. let mut track1_producer = dynamic.assert_request(); @@ -479,16 +478,16 @@ mod test { assert_eq!(track1_producer.info.name, "track1"); // Dedup: consuming the same track again should return the existing one. - let mut track1_dup = consumer2.assert_consume_track(&Track::new("track1")); + let track1_dup = consumer2.assert_consume_track(&Track::new("track1")); track1_dup.assert_is_clone(&track1_consumer); // No new request should be queued. dynamic.assert_no_request(); - // Append a group and make sure both get it. + // Append a group and make sure both see it. track1_producer.append_group().unwrap(); - track1_consumer.assert_group(); - track1_dup.assert_group(); + assert_eq!(track1_consumer.latest(), Some(0)); + assert_eq!(track1_dup.latest(), Some(0)); } #[tokio::test] @@ -521,12 +520,12 @@ mod test { tokio::time::advance(std::time::Duration::from_millis(1)).await; // Subscribe again to the same track -- should get a new request. - let mut track2_consumer = consumer.assert_consume_track(&Track::new("track1")); + let track2_consumer = consumer.assert_consume_track(&Track::new("track1")); let mut producer2 = dynamic.assert_request(); producer2.append_group().unwrap(); - track2_consumer.assert_group(); + assert_eq!(track2_consumer.latest(), Some(0)); } #[tokio::test] diff --git a/rs/moq-lite/src/model/track.rs b/rs/moq-lite/src/model/track.rs index 41dc7c319..a7119b72d 100644 --- a/rs/moq-lite/src/model/track.rs +++ b/rs/moq-lite/src/model/track.rs @@ -409,7 +409,6 @@ impl TrackProducer { TrackConsumer { info: self.info.clone(), state: self.state.consume(), - index: 0, } } @@ -533,7 +532,6 @@ impl TrackWeak { TrackConsumer { info: self.info.clone(), state: self.state.consume(), - index: 0, } } @@ -567,7 +565,7 @@ impl TrackSubscriber { conducer::wait(|waiter| self.poll_recv_group(waiter)).await } - fn poll_recv_group(&mut self, waiter: &conducer::Waiter) -> Poll>> { + pub fn poll_recv_group(&mut self, waiter: &conducer::Waiter) -> Poll>> { let sub = self.sub.read(); let min_sequence = sub.start.unwrap_or(0); let end = sub.end; @@ -607,6 +605,16 @@ impl TrackSubscriber { self.sub.read().clone() } + /// Poll for track closure, without blocking. + pub fn poll_closed(&self, waiter: &conducer::Waiter) -> Poll> { + self.poll(waiter, |state| state.poll_closed()) + } + + /// Block until the track is closed. + pub async fn closed(&self) -> Result<()> { + conducer::wait(|waiter| self.poll_closed(waiter)).await + } + // A helper to automatically apply Dropped if the state is closed without an error. fn poll(&self, waiter: &conducer::Waiter, f: F) -> Poll> where @@ -632,7 +640,6 @@ impl Drop for TrackSubscriber { pub struct TrackConsumer { pub info: Track, state: conducer::Consumer, - index: usize, } impl TrackConsumer { @@ -648,46 +655,16 @@ impl TrackConsumer { }) } - /// Poll for the next group received over the network, without blocking. - /// - /// Groups may arrive out of order or with gaps due to network conditions. - /// Use `OrderedConsumer` if you need groups in sequence order, - /// skipping those that arrive too late. - /// - /// Returns `Poll::Ready(Ok(Some(group)))` when a group is available, - /// `Poll::Ready(Ok(None))` when the track is finished, - /// `Poll::Ready(Err(e))` when the track has been aborted, or - /// `Poll::Pending` when no group is available yet. - pub fn poll_recv_group(&mut self, waiter: &conducer::Waiter) -> Poll>> { - let Some((consumer, found_index)) = - ready!(self.poll(waiter, |state| state.poll_next_group(self.index, 0))?) - else { - return Poll::Ready(Ok(None)); - }; - - self.index = found_index + 1; - Poll::Ready(Ok(Some(consumer))) - } - - /// Receive the next group available on this track. - /// - /// Groups may arrive out of order or with gaps due to network conditions. - /// Use `OrderedConsumer` if you need groups in sequence order, - /// skipping those that arrive too late. - pub async fn recv_group(&mut self) -> Result> { - conducer::wait(|waiter| self.poll_recv_group(waiter)).await - } - - /// Deprecated: Use [`recv_group`](Self::recv_group) instead. - #[deprecated(note = "Use recv_group instead")] + /// Removed: Use [`TrackSubscriber::recv_group`] instead. + #[deprecated(note = "Use TrackSubscriber::recv_group instead")] pub async fn next_group(&mut self) -> Result> { - self.recv_group().await + unimplemented!("Use TrackSubscriber::recv_group instead") } - /// Deprecated: Use [`poll_recv_group`](Self::poll_recv_group) instead. - #[deprecated(note = "Use poll_recv_group instead")] - pub fn poll_next_group(&mut self, waiter: &conducer::Waiter) -> Poll>> { - self.poll_recv_group(waiter) + /// Removed: Use [`TrackSubscriber::poll_recv_group`] instead. + #[deprecated(note = "Use TrackSubscriber::poll_recv_group instead")] + pub fn poll_next_group(&mut self, _waiter: &conducer::Waiter) -> Poll>> { + unimplemented!("Use TrackSubscriber::poll_recv_group instead") } /// Poll for the group with the given sequence, without blocking. @@ -765,9 +742,10 @@ impl TrackConsumer { use futures::FutureExt; #[cfg(test)] +#[allow(deprecated)] impl TrackConsumer { pub fn assert_group(&mut self) -> GroupConsumer { - self.recv_group() + self.next_group() .now_or_never() .expect("group would have blocked") .expect("would have errored") @@ -776,8 +754,8 @@ impl TrackConsumer { pub fn assert_no_group(&mut self) { assert!( - self.recv_group().now_or_never().is_none(), - "recv_group would not have blocked" + self.next_group().now_or_never().is_none(), + "next_group would not have blocked" ); } @@ -804,6 +782,12 @@ impl TrackConsumer { pub fn assert_not_clone(&self, other: &Self) { assert!(!self.is_clone(other), "should not be clone"); } + + pub async fn assert_subscribe(&self) -> TrackSubscriber { + self.subscribe(Subscription::default()) + .await + .expect("subscribe should not have errored") + } } #[cfg(test)] @@ -895,19 +879,20 @@ mod test { } #[tokio::test] - async fn consumer_skips_evicted_groups() { + async fn subscriber_skips_evicted_groups() { tokio::time::pause(); let mut producer = Track::new("test").produce(); producer.append_group().unwrap(); // seq 0 - let mut consumer = producer.consume(); + let consumer = producer.consume(); + let mut subscriber = consumer.subscribe(Subscription::default()).await.unwrap(); tokio::time::advance(MAX_GROUP_AGE + Duration::from_secs(1)).await; producer.append_group().unwrap(); // seq 1 - // Group 0 was evicted. Consumer should get group 1. - let group = consumer.assert_group(); + // Group 0 was evicted. Subscriber should get group 1. + let group = subscriber.recv_group().now_or_never().unwrap().unwrap().unwrap(); assert_eq!(group.info.sequence, 1); } @@ -988,10 +973,11 @@ mod test { assert!(state.duplicates.contains(&2)); } - // Consumer should still be able to read through the hole. - let mut consumer = producer.consume(); - let group = consumer.assert_group(); - // consume() starts at index 0, first non-tombstoned group is seq 5. + // Subscriber should still be able to read through the hole. + let consumer = producer.consume(); + let mut subscriber = consumer.subscribe(Subscription::default()).await.unwrap(); + let group = subscriber.recv_group().now_or_never().unwrap().unwrap().unwrap(); + // subscribe() starts at index 0, first non-tombstoned group is seq 5. assert_eq!(group.info.sequence, 5); } @@ -1040,10 +1026,21 @@ mod test { producer.create_group(Group { sequence: 1 }).unwrap(); producer.finish_at(1).unwrap(); - let mut consumer = producer.consume(); - assert_eq!(consumer.assert_group().info.sequence, 1); + let consumer = producer.consume(); + let mut subscriber = consumer.subscribe(Subscription::default()).await.unwrap(); + assert_eq!( + subscriber + .recv_group() + .now_or_never() + .expect("should not block") + .expect("would have errored") + .expect("track was closed") + .info + .sequence, + 1 + ); - let done = consumer + let done = subscriber .recv_group() .now_or_never() .expect("should not block") @@ -1117,4 +1114,87 @@ mod test { let result = producer.create_group(Group { sequence: 5 }); assert!(matches!(result, Err(Error::Duplicate))); } + + #[tokio::test] + async fn subscribe_unblocks_on_first_group() { + let mut producer = Track::new("test").produce(); + let consumer = producer.consume(); + + // subscribe blocks until a group exists. + let handle = tokio::spawn(async move { consumer.subscribe(Subscription::default()).await }); + + // Yield to let subscribe start waiting. + tokio::task::yield_now().await; + + // Write a group to unblock subscribe. + producer.append_group().unwrap(); + + let subscriber = handle.await.unwrap().unwrap(); + assert_eq!(subscriber.latest(), Some(0)); + } + + #[tokio::test] + async fn subscribe_unblocks_on_finish() { + let mut producer = Track::new("test").produce(); + let consumer = producer.consume(); + + let handle = tokio::spawn(async move { consumer.subscribe(Subscription::default()).await }); + + tokio::task::yield_now().await; + + // Finishing without any groups should also unblock subscribe. + producer.finish().unwrap(); + + let subscriber = handle.await.unwrap().unwrap(); + assert!(subscriber.latest().is_none()); + } + + #[tokio::test] + async fn subscribe_unblocks_on_abort() { + let mut producer = Track::new("test").produce(); + let consumer = producer.consume(); + + let handle = tokio::spawn(async move { consumer.subscribe(Subscription::default()).await }); + + tokio::task::yield_now().await; + + // Aborting should unblock subscribe with an error. + producer.abort(Error::Cancel).unwrap(); + + let result = handle.await.unwrap(); + assert!(result.is_err(), "subscribe should return error on abort"); + } + + #[tokio::test] + async fn subscribe_returns_immediately_if_group_exists() { + let mut producer = Track::new("test").produce(); + producer.append_group().unwrap(); + + let consumer = producer.consume(); + + // Should not block since a group already exists. + let subscriber = consumer + .subscribe(Subscription::default()) + .now_or_never() + .expect("should not block") + .expect("should not error"); + + assert_eq!(subscriber.latest(), Some(0)); + } + + #[tokio::test] + async fn subscribe_returns_immediately_if_finished() { + let mut producer = Track::new("test").produce(); + producer.finish().unwrap(); + + let consumer = producer.consume(); + + let subscriber = consumer + .subscribe(Subscription::default()) + .now_or_never() + .expect("should not block") + .expect("should not error"); + + assert!(subscriber.latest().is_none()); + } } diff --git a/rs/moq-mux/src/catalog.rs b/rs/moq-mux/src/catalog.rs index a0dbd1dc9..b51573ce0 100644 --- a/rs/moq-mux/src/catalog.rs +++ b/rs/moq-mux/src/catalog.rs @@ -54,8 +54,10 @@ impl CatalogProducer { } /// Create a consumer for this catalog, receiving updates as they're published. - pub fn consume(&self) -> hang::CatalogConsumer { - hang::CatalogConsumer::new(self.hang_track.consume()) + pub async fn consume(&self) -> Result { + let track = self.hang_track.consume(); + let subscriber = track.subscribe(moq_lite::Subscription::default()).await?; + Ok(hang::CatalogConsumer::new(subscriber)) } /// Finish publishing to this catalog. diff --git a/rs/moq-native/tests/backend.rs b/rs/moq-native/tests/backend.rs index 4ea43f401..239572443 100644 --- a/rs/moq-native/tests/backend.rs +++ b/rs/moq-native/tests/backend.rs @@ -69,9 +69,13 @@ async fn backend_test(scheme: &str, backend: moq_native::QuicBackend) { assert_eq!(path.as_str(), "test"); let bc = bc.expect("expected announce, got unannounce"); - let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let mut subscriber = tokio::time::timeout(TIMEOUT, track_sub.subscribe(Default::default())) + .await + .expect("subscribe timed out") + .expect("subscribe failed"); - let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.recv_group()) + let mut group_sub = tokio::time::timeout(TIMEOUT, subscriber.recv_group()) .await .expect("recv_group timed out") .expect("recv_group failed") @@ -219,9 +223,13 @@ async fn iroh_connect() { assert_eq!(path.as_str(), "test"); let bc = bc.expect("expected announce, got unannounce"); - let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let mut subscriber = tokio::time::timeout(TIMEOUT, track_sub.subscribe(Default::default())) + .await + .expect("subscribe timed out") + .expect("subscribe failed"); - let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.recv_group()) + let mut group_sub = tokio::time::timeout(TIMEOUT, subscriber.recv_group()) .await .expect("recv_group timed out") .expect("recv_group failed") diff --git a/rs/moq-native/tests/broadcast.rs b/rs/moq-native/tests/broadcast.rs index 481e18863..0c9f7f254 100644 --- a/rs/moq-native/tests/broadcast.rs +++ b/rs/moq-native/tests/broadcast.rs @@ -87,10 +87,14 @@ async fn broadcast_test(scheme: &str, client_version: Option<&str>, server_versi let bc = bc.expect("expected announce, got unannounce"); // Subscribe to the track. - let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let mut subscriber = tokio::time::timeout(TIMEOUT, track_sub.subscribe(Default::default())) + .await + .expect("subscribe timed out") + .expect("subscribe failed"); // Read one group. - let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.recv_group()) + let mut group_sub = tokio::time::timeout(TIMEOUT, subscriber.recv_group()) .await .expect("recv_group timed out") .expect("recv_group failed") @@ -425,10 +429,14 @@ async fn broadcast_websocket() { let bc = bc.expect("expected announce, got unannounce"); // Subscribe to the track. - let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let mut subscriber = tokio::time::timeout(TIMEOUT, track_sub.subscribe(Default::default())) + .await + .expect("subscribe timed out") + .expect("subscribe failed"); // Read one group. - let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.recv_group()) + let mut group_sub = tokio::time::timeout(TIMEOUT, subscriber.recv_group()) .await .expect("recv_group timed out") .expect("recv_group failed") @@ -530,9 +538,13 @@ async fn broadcast_websocket_fallback() { let bc = bc.expect("expected announce, got unannounce"); // Subscribe to the track. - let mut track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let track_sub = bc.consume_track(&Track::new("video")).expect("consume_track failed"); + let mut subscriber = tokio::time::timeout(TIMEOUT, track_sub.subscribe(Default::default())) + .await + .expect("subscribe timed out") + .expect("subscribe failed"); - let mut group_sub = tokio::time::timeout(TIMEOUT, track_sub.recv_group()) + let mut group_sub = tokio::time::timeout(TIMEOUT, subscriber.recv_group()) .await .expect("recv_group timed out") .expect("recv_group failed") diff --git a/rs/moq-relay/src/web.rs b/rs/moq-relay/src/web.rs index eb28a0228..f33eab548 100644 --- a/rs/moq-relay/src/web.rs +++ b/rs/moq-relay/src/web.rs @@ -281,7 +281,7 @@ async fn serve_fetch( // NOTE: The auth token is already scoped to the broadcast. let broadcast = origin.consume_broadcast("").ok_or(StatusCode::NOT_FOUND)?; - let mut track = broadcast.consume_track(&track).map_err(|err| match err { + let track = broadcast.consume_track(&track).map_err(|err| match err { moq_lite::Error::NotFound => StatusCode::NOT_FOUND, _ => StatusCode::INTERNAL_SERVER_ERROR, })?; @@ -289,19 +289,22 @@ async fn serve_fetch( let deadline = tokio::time::Instant::now() + tokio::time::Duration::from_secs(30); let result = tokio::time::timeout_at(deadline, async { + let to_err = |_: moq_lite::Error| StatusCode::INTERNAL_SERVER_ERROR; + let group = match params.group { FetchGroup::Latest => match track.latest() { - Some(sequence) => track.get_group(sequence).await, - None => track.recv_group().await, + Some(sequence) => track.get_group(sequence).await.map_err(to_err)?, + None => { + let mut sub = track + .subscribe(moq_lite::Subscription::default()) + .await + .map_err(to_err)?; + sub.recv_group().await.map_err(to_err)? + } }, - FetchGroup::Num(sequence) => track.get_group(sequence).await, - }; - - let group = match group { - Ok(Some(group)) => group, - Ok(None) => return Err(StatusCode::NOT_FOUND), - Err(_) => return Err(StatusCode::INTERNAL_SERVER_ERROR), - }; + FetchGroup::Num(sequence) => track.get_group(sequence).await.map_err(to_err)?, + } + .ok_or(StatusCode::NOT_FOUND)?; tracing::info!(track = %track.info.name, group = %group.info.sequence, "serving group"); From f4ce1f4ed00341c7b518f12666ddc5c380571db5 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 14:33:41 -0700 Subject: [PATCH 10/14] Split NotFound into UnknownBroadcast and UnknownTrack Deprecate the generic NotFound error in favor of more specific variants to make debugging easier. Each maps to its own wire code. Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/moq-lite/src/error.rs | 12 ++++++++++++ rs/moq-lite/src/ietf/adapter.rs | 2 +- rs/moq-lite/src/ietf/subscriber.rs | 4 ++-- rs/moq-lite/src/lite/publisher.rs | 2 +- rs/moq-lite/src/lite/subscriber.rs | 2 +- rs/moq-lite/src/model/broadcast.rs | 4 ++-- rs/moq-relay/src/web.rs | 2 +- 7 files changed, 20 insertions(+), 8 deletions(-) diff --git a/rs/moq-lite/src/error.rs b/rs/moq-lite/src/error.rs index 09d30d5ec..d4c08cab8 100644 --- a/rs/moq-lite/src/error.rs +++ b/rs/moq-lite/src/error.rs @@ -47,9 +47,16 @@ pub enum Error { #[error("app code={0}")] App(u16), + #[deprecated(note = "Use UnknownBroadcast or UnknownTrack instead")] #[error("not found")] NotFound, + #[error("unknown broadcast")] + UnknownBroadcast, + + #[error("unknown track")] + UnknownTrack, + #[error("wrong frame size")] WrongSize, @@ -84,6 +91,7 @@ pub enum Error { Closed, } +#[allow(deprecated)] impl Error { /// An integer code that is sent over the wire. pub fn to_code(&self) -> u32 { @@ -100,6 +108,8 @@ impl Error { Self::BoundsExceeded => 11, Self::Duplicate => 12, Self::NotFound => 13, + Self::UnknownBroadcast => 26, + Self::UnknownTrack => 27, Self::WrongSize => 14, Self::ProtocolViolation => 15, Self::UnexpectedMessage => 16, @@ -138,6 +148,8 @@ impl Error { 20 => Self::InvalidRole, 24 => Self::Dropped, 25 => Self::Closed, + 26 => Self::UnknownBroadcast, + 27 => Self::UnknownTrack, code if code >= 64 => match u16::try_from(code - 64) { Ok(app) => Self::App(app), Err(_) => Self::ProtocolViolation, diff --git a/rs/moq-lite/src/ietf/adapter.rs b/rs/moq-lite/src/ietf/adapter.rs index 64ba1a228..751568183 100644 --- a/rs/moq-lite/src/ietf/adapter.rs +++ b/rs/moq-lite/src/ietf/adapter.rs @@ -636,7 +636,7 @@ impl ControlStreamAdapter { .unwrap() .get(&ns) .copied() - .ok_or(Error::NotFound) + .ok_or(Error::UnknownBroadcast) } } diff --git a/rs/moq-lite/src/ietf/subscriber.rs b/rs/moq-lite/src/ietf/subscriber.rs index df3b47b8f..3712e9dc3 100644 --- a/rs/moq-lite/src/ietf/subscriber.rs +++ b/rs/moq-lite/src/ietf/subscriber.rs @@ -454,7 +454,7 @@ impl Subscriber { entry.remove(); } } - Entry::Vacant(_) => return Err(Error::NotFound), + Entry::Vacant(_) => return Err(Error::UnknownBroadcast), }; Ok(()) @@ -676,7 +676,7 @@ impl Subscriber { RequestId(group.track_alias) } }; - let track = state.subscribes.get_mut(&request_id).ok_or(Error::NotFound)?; + let track = state.subscribes.get_mut(&request_id).ok_or(Error::UnknownTrack)?; let group = Group { sequence: group.group_id, diff --git a/rs/moq-lite/src/lite/publisher.rs b/rs/moq-lite/src/lite/publisher.rs index 92d0a3881..dbcbe450c 100644 --- a/rs/moq-lite/src/lite/publisher.rs +++ b/rs/moq-lite/src/lite/publisher.rs @@ -262,7 +262,7 @@ impl Publisher { ) -> Result<(), Error> { let track = Track::new(subscribe.track.to_string()); - let broadcast = consumer.ok_or(Error::NotFound)?; + let broadcast = consumer.ok_or(Error::UnknownBroadcast)?; let track = broadcast.consume_track(&track)?; let subscriber = track.subscribe(Subscription::default()).await?; diff --git a/rs/moq-lite/src/lite/subscriber.rs b/rs/moq-lite/src/lite/subscriber.rs index 37becb5c2..365c5ec34 100644 --- a/rs/moq-lite/src/lite/subscriber.rs +++ b/rs/moq-lite/src/lite/subscriber.rs @@ -113,7 +113,7 @@ impl Subscriber { tracing::debug!(broadcast = %self.log_path(&path), "unannounced"); // Abort the producer. - let mut producer = producers.remove(&path.into_owned()).ok_or(Error::NotFound)?; + let mut producer = producers.remove(&path.into_owned()).ok_or(Error::UnknownBroadcast)?; producer.abort(Error::Cancel).ok(); } } diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index 50cdc8a9c..6278d649c 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -132,7 +132,7 @@ impl BroadcastProducer { /// Remove a track from the lookup. pub fn remove_track(&mut self, name: &str) -> Result<(), Error> { let mut state = modify(&self.state)?; - state.tracks.remove(name).ok_or(Error::NotFound)?; + state.tracks.remove(name).ok_or(Error::UnknownTrack)?; Ok(()) } @@ -361,7 +361,7 @@ impl BroadcastConsumer { } if state.dynamic == 0 { - return Err(Error::NotFound); + return Err(Error::UnknownTrack); } // Create a new TrackProducer, insert into lookup, and queue for dynamic handler. diff --git a/rs/moq-relay/src/web.rs b/rs/moq-relay/src/web.rs index f33eab548..63f563332 100644 --- a/rs/moq-relay/src/web.rs +++ b/rs/moq-relay/src/web.rs @@ -282,7 +282,7 @@ async fn serve_fetch( // NOTE: The auth token is already scoped to the broadcast. let broadcast = origin.consume_broadcast("").ok_or(StatusCode::NOT_FOUND)?; let track = broadcast.consume_track(&track).map_err(|err| match err { - moq_lite::Error::NotFound => StatusCode::NOT_FOUND, + moq_lite::Error::UnknownTrack | moq_lite::Error::UnknownBroadcast => StatusCode::NOT_FOUND, _ => StatusCode::INTERNAL_SERVER_ERROR, })?; From e6ae394a5cc825937aecf355940c8f2a6eb776bf Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Thu, 19 Mar 2026 14:47:58 -0700 Subject: [PATCH 11/14] Remove spawn_track_cleanup, fix unknown track error The cleanup task was firing immediately (unused() resolved with zero consumers), removing tracks from the lookup before any subscriber could find them. This matches main's behavior: stale entries are cleaned up lazily by consume_track when it finds closed weak refs. Co-Authored-By: Claude Opus 4.6 (1M context) --- rs/moq-lite/src/model/broadcast.rs | 53 +++++------------------------- rs/moq-lite/src/model/track.rs | 11 ------- 2 files changed, 9 insertions(+), 55 deletions(-) diff --git a/rs/moq-lite/src/model/broadcast.rs b/rs/moq-lite/src/model/broadcast.rs index 6278d649c..c38b40d70 100644 --- a/rs/moq-lite/src/model/broadcast.rs +++ b/rs/moq-lite/src/model/broadcast.rs @@ -75,29 +75,6 @@ fn modify(state: &conducer::Producer) -> Result, } } -/// Spawn a cleanup task that removes the track from the lookup when all consumers are dropped. -fn spawn_track_cleanup(state: &conducer::Producer, track: &TrackProducer) { - let weak = track.weak(); - let consumer_state = state.consume(); - web_async::spawn(async move { - let _ = weak.unused().await; - - let Some(producer) = consumer_state.produce() else { - return; - }; - let Ok(mut state) = producer.write() else { - return; - }; - - // Remove the entry, but reinsert if it was replaced by a different reference. - if let Some(current) = state.tracks.remove(&weak.info.name) - && !current.is_clone(&weak) - { - state.tracks.insert(current.info.name.clone(), current); - } - }); -} - /// Manages tracks within a broadcast. /// /// Insert tracks statically with [Self::insert_track] / [Self::create_track], @@ -200,13 +177,10 @@ impl BroadcastProducer { } } -/// Insert a track into the broadcast lookup and spawn a cleanup task. +/// Insert a track into the broadcast lookup. fn insert_track_impl(state: &conducer::Producer, track: &TrackProducer) -> Result<(), Error> { let mut guard = modify(state)?; guard.insert_track(track)?; - drop(guard); - - spawn_track_cleanup(state, track); Ok(()) } @@ -368,10 +342,7 @@ impl BroadcastConsumer { let track_producer = TrackProducer::new(track.clone()); state.insert_track(&track_producer)?; let consumer = track_producer.consume(); - state.requested.push(track_producer.clone()); - drop(state); - - spawn_track_cleanup(&producer, &track_producer); + state.requested.push(track_producer); Ok(consumer) } @@ -511,15 +482,10 @@ mod test { // The consumer should see the track as closed. track1_consumer.assert_closed(); - - // Advance time to let cleanup task run. - tokio::time::advance(std::time::Duration::from_millis(1)).await; - let track1_consumer_clone = consumer.assert_consume_track(&Track::new("track1")); drop(track1_consumer); - drop(track1_consumer_clone); - tokio::time::advance(std::time::Duration::from_millis(1)).await; - // Subscribe again to the same track -- should get a new request. + // Subscribe again to the same track -- should get a new request + // because the old producer was dropped (weak ref is closed). let track2_consumer = consumer.assert_consume_track(&Track::new("track1")); let mut producer2 = dynamic.assert_request(); @@ -530,7 +496,6 @@ mod test { #[tokio::test] async fn requested_unused() { - tokio::time::pause(); let broadcast = Broadcast::new().produce(); let mut dynamic = broadcast.dynamic(); @@ -564,15 +529,15 @@ mod test { "track producer should be unused after consumer is dropped" ); - // Advance paused time to let the async cleanup task run. - tokio::time::advance(std::time::Duration::from_millis(1)).await; + // Drop the producer so the weak ref is closed. + drop(producer1); - // Now we can subscribe again. + // Now consume_track finds the stale entry, removes it, and creates a new request. let c2 = broadcast.consume(); let _consumer3 = c2.assert_consume_track(&Track::new("unknown_track")); - let producer2 = dynamic.assert_request(); - assert!(!producer2.is_clone(&producer1)); + let _producer2 = dynamic.assert_request(); + dynamic.assert_no_request(); } #[tokio::test] diff --git a/rs/moq-lite/src/model/track.rs b/rs/moq-lite/src/model/track.rs index a7119b72d..c1ac86130 100644 --- a/rs/moq-lite/src/model/track.rs +++ b/rs/moq-lite/src/model/track.rs @@ -534,17 +534,6 @@ impl TrackWeak { state: self.state.consume(), } } - - pub async fn unused(&self) -> crate::Result<()> { - self.state - .unused() - .await - .map_err(|r| r.abort.clone().unwrap_or(Error::Dropped)) - } - - pub fn is_clone(&self, other: &Self) -> bool { - self.state.same_channel(&other.state) - } } /// Iterates groups from a track while managing this subscriber's subscription lifecycle. From 73fd42bc971044f5e59736d73b1a6ce7177021f7 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Sat, 21 Mar 2026 09:42:38 -0700 Subject: [PATCH 12/14] Add dual MSF + Hang catalog support in @moq/watch Create @moq/msf package with Zod schemas for MSF catalog format, and update the watch broadcast to subscribe to both catalog formats in parallel. Hang gets a 100ms headstart; first catalog to arrive wins. Co-Authored-By: Claude Opus 4.6 (1M context) --- bun.lock | 17 ++++++++- js/msf/package.json | 24 +++++++++++++ js/msf/src/catalog.ts | 61 +++++++++++++++++++++++++++++++ js/msf/src/index.ts | 1 + js/msf/tsconfig.json | 8 +++++ js/watch/package.json | 1 + js/watch/src/broadcast.ts | 73 ++++++++++++++++++++++++++++++++----- js/watch/src/msf.ts | 75 +++++++++++++++++++++++++++++++++++++++ package.json | 1 + 9 files changed, 252 insertions(+), 9 deletions(-) create mode 100644 js/msf/package.json create mode 100644 js/msf/src/catalog.ts create mode 100644 js/msf/src/index.ts create mode 100644 js/msf/tsconfig.json create mode 100644 js/watch/src/msf.ts diff --git a/bun.lock b/bun.lock index ac7c11629..3bb663d45 100644 --- a/bun.lock +++ b/bun.lock @@ -77,7 +77,7 @@ }, "js/lite": { "name": "@moq/lite", - "version": "0.1.6", + "version": "0.1.7", "dependencies": { "@moq/qmux": "^0.0.6", "@moq/signals": "workspace:*", @@ -94,6 +94,18 @@ "zod": "^4.1.0", }, }, + "js/msf": { + "name": "@moq/msf", + "version": "0.1.0", + "dependencies": { + "@moq/lite": "workspace:^", + "zod": "^4.1.5", + }, + "devDependencies": { + "rimraf": "^6.0.1", + "typescript": "^5.9.2", + }, + }, "js/publish": { "name": "@moq/publish", "version": "0.2.3", @@ -173,6 +185,7 @@ "dependencies": { "@moq/hang": "workspace:^", "@moq/lite": "workspace:^", + "@moq/msf": "workspace:^", "@moq/signals": "workspace:^", "@moq/ui-core": "workspace:^", }, @@ -449,6 +462,8 @@ "@moq/lite": ["@moq/lite@workspace:js/lite"], + "@moq/msf": ["@moq/msf@workspace:js/msf"], + "@moq/publish": ["@moq/publish@workspace:js/publish"], "@moq/qmux": ["@moq/qmux@0.0.6", "", {}, "sha512-ISuGz05lUvf1hzHW3Aw3VnsGRJe1w9Qdog3LQ66KS+l+5mzQsPANvW8yOioEe1Z9dJO2G3sAHoGPnzwnsY9SIQ=="], diff --git a/js/msf/package.json b/js/msf/package.json new file mode 100644 index 000000000..843e1d54d --- /dev/null +++ b/js/msf/package.json @@ -0,0 +1,24 @@ +{ + "name": "@moq/msf", + "type": "module", + "version": "0.1.0", + "description": "MSF (MOQT Streaming Format) catalog types for MoQ", + "license": "(MIT OR Apache-2.0)", + "repository": "github:moq-dev/moq", + "exports": { + ".": "./src/index.ts" + }, + "scripts": { + "build": "rimraf dist && tsc -b && bun ../common/package.ts", + "check": "tsc --noEmit", + "release": "bun ../common/release.ts" + }, + "dependencies": { + "@moq/lite": "workspace:^", + "zod": "^4.1.5" + }, + "devDependencies": { + "rimraf": "^6.0.1", + "typescript": "^5.9.2" + } +} diff --git a/js/msf/src/catalog.ts b/js/msf/src/catalog.ts new file mode 100644 index 000000000..7ee67368b --- /dev/null +++ b/js/msf/src/catalog.ts @@ -0,0 +1,61 @@ +import type * as Moq from "@moq/lite"; +import { z } from "zod"; + +export const PackagingSchema = z.enum(["loc", "cmaf", "legacy", "mediatimeline", "eventtimeline"]).or(z.string()); + +export type Packaging = z.infer; + +export const RoleSchema = z + .enum(["video", "audio", "audiodescription", "caption", "subtitle", "signlanguage"]) + .or(z.string()); + +export type Role = z.infer; + +export const TrackSchema = z.object({ + name: z.string(), + packaging: PackagingSchema, + isLive: z.boolean(), + role: RoleSchema.optional(), + codec: z.string().optional(), + width: z.number().optional(), + height: z.number().optional(), + framerate: z.number().optional(), + samplerate: z.number().optional(), + channelConfig: z.string().optional(), + bitrate: z.number().optional(), + initData: z.string().optional(), + renderGroup: z.number().optional(), + altGroup: z.number().optional(), +}); + +export type Track = z.infer; + +export const CatalogSchema = z.object({ + version: z.literal(1), + tracks: z.array(TrackSchema), +}); + +export type Catalog = z.infer; + +export function encode(catalog: Catalog): Uint8Array { + const encoder = new TextEncoder(); + return encoder.encode(JSON.stringify(catalog)); +} + +export function decode(raw: Uint8Array): Catalog { + const decoder = new TextDecoder(); + const str = decoder.decode(raw); + try { + const json = JSON.parse(str); + return CatalogSchema.parse(json); + } catch (error) { + console.warn("invalid MSF catalog", str); + throw error; + } +} + +export async function fetch(track: Moq.Track): Promise { + const frame = await track.readFrame(); + if (!frame) return undefined; + return decode(frame); +} diff --git a/js/msf/src/index.ts b/js/msf/src/index.ts new file mode 100644 index 000000000..669ac6719 --- /dev/null +++ b/js/msf/src/index.ts @@ -0,0 +1 @@ +export * from "./catalog"; diff --git a/js/msf/tsconfig.json b/js/msf/tsconfig.json new file mode 100644 index 000000000..0f506334d --- /dev/null +++ b/js/msf/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "./src" + }, + "include": ["src"] +} diff --git a/js/watch/package.json b/js/watch/package.json index 9b032beb1..d04843488 100644 --- a/js/watch/package.json +++ b/js/watch/package.json @@ -26,6 +26,7 @@ "dependencies": { "@moq/hang": "workspace:^", "@moq/lite": "workspace:^", + "@moq/msf": "workspace:^", "@moq/signals": "workspace:^", "@moq/ui-core": "workspace:^" }, diff --git a/js/watch/src/broadcast.ts b/js/watch/src/broadcast.ts index 5ebc9181c..2f99a52a1 100644 --- a/js/watch/src/broadcast.ts +++ b/js/watch/src/broadcast.ts @@ -1,8 +1,13 @@ import * as Catalog from "@moq/hang/catalog"; import type * as Moq from "@moq/lite"; import { Path } from "@moq/lite"; +import * as Msf from "@moq/msf"; import { Effect, type Getter, Signal } from "@moq/signals"; +import { toHang } from "./msf"; + +export type CatalogFormat = "hang" | "msf"; + export interface BroadcastProps { connection?: Moq.Connection.Established | Signal; @@ -19,6 +24,9 @@ export interface BroadcastProps { // Whether to reload the broadcast when it goes offline. // Defaults to false; pass true to wait for an announcement before subscribing. reload?: boolean | Signal; + + // Which catalog formats to try. Default: ["hang", "msf"] + catalog?: CatalogFormat[] | Signal; } // A catalog source that (optionally) reloads automatically when live/offline. @@ -30,6 +38,8 @@ export class Broadcast { status = new Signal<"offline" | "loading" | "live">("offline"); reload: Signal; + catalogFormats: Signal; + #active = new Signal(undefined); readonly active: Getter = this.#active; @@ -46,6 +56,7 @@ export class Broadcast { this.name = Signal.from(props?.name ?? Path.empty()); this.enabled = Signal.from(props?.enabled ?? false); this.reload = Signal.from(props?.reload ?? false); + this.catalogFormats = Signal.from(props?.catalog ?? (["hang", "msf"] as CatalogFormat[])); this.#announced = props?.announced ?? new Signal(new Set()); @@ -84,21 +95,67 @@ export class Broadcast { if (!values) return; const [_, broadcast] = values; + const formats = effect.get(this.catalogFormats); this.status.set("loading"); - const catalog = broadcast.subscribe("catalog.json", Catalog.PRIORITY.catalog); - effect.cleanup(() => catalog.close()); + const hangTrack = formats.includes("hang") + ? broadcast.subscribe("catalog.json", Catalog.PRIORITY.catalog) + : undefined; + const msfTrack = formats.includes("msf") ? broadcast.subscribe("catalog", Catalog.PRIORITY.catalog) : undefined; + + if (hangTrack) effect.cleanup(() => hangTrack.close()); + if (msfTrack) effect.cleanup(() => msfTrack.close()); effect.spawn(async () => { try { - for (;;) { - const update = await Promise.race([effect.cancel, Catalog.fetch(catalog)]); - if (!update) break; + // Race the first catalog fetch, giving hang a 100ms headstart + const hangFetch = hangTrack + ? Catalog.fetch(hangTrack).then((r) => (r ? { kind: "hang" as const, root: r } : undefined)) + : undefined; + + const msfFetch = msfTrack + ? new Promise((r) => setTimeout(r, 100)) + .then(() => Msf.fetch(msfTrack)) + .then((c) => (c ? { kind: "msf" as const, root: toHang(c) } : undefined)) + : undefined; + + const candidates = [effect.cancel, hangFetch, msfFetch].filter( + (c): c is NonNullable => c != null, + ); + const first = await Promise.race(candidates); + if (!first) return; + + // Close the loser + if (first.kind === "hang") { + msfTrack?.close(); + } else { + hangTrack?.close(); + } - console.debug("received catalog", this.name.peek(), update); + console.debug("received catalog", first.kind, this.name.peek(), first.root); + this.#catalog.set(first.root); + this.status.set("live"); + + // Continue reading updates from the winner + const fetchNext = + first.kind === "hang" + ? async () => { + const update = await Promise.race([ + effect.cancel, + Catalog.fetch(hangTrack as Moq.Track), + ]); + return update ?? undefined; + } + : async () => { + const update = await Promise.race([effect.cancel, Msf.fetch(msfTrack as Moq.Track)]); + return update ? toHang(update) : undefined; + }; - this.#catalog.set(update); - this.status.set("live"); + for (;;) { + const root = await fetchNext(); + if (!root) break; + console.debug("received catalog", first.kind, this.name.peek(), root); + this.#catalog.set(root); } } catch (err) { console.warn("error fetching catalog", this.name.peek(), err); diff --git a/js/watch/src/msf.ts b/js/watch/src/msf.ts new file mode 100644 index 000000000..c86953b3f --- /dev/null +++ b/js/watch/src/msf.ts @@ -0,0 +1,75 @@ +import type * as Catalog from "@moq/hang/catalog"; +import { u53 } from "@moq/hang/catalog"; +import type * as Msf from "@moq/msf"; + +// Convert base64 string to hex string +function base64ToHex(b64: string): string { + const raw = atob(b64); + let hex = ""; + for (let i = 0; i < raw.length; i++) { + hex += raw.charCodeAt(i).toString(16).padStart(2, "0"); + } + return hex; +} + +function toContainer(track: Msf.Track): Catalog.Container { + if (track.packaging === "cmaf" && track.initData) { + return { kind: "cmaf", initData: track.initData }; + } + return { kind: "legacy" }; +} + +function toVideoConfig(track: Msf.Track): Catalog.VideoConfig | undefined { + if (!track.codec) return undefined; + + return { + codec: track.codec, + container: toContainer(track), + description: track.packaging !== "cmaf" && track.initData ? base64ToHex(track.initData) : undefined, + codedWidth: track.width != null ? u53(track.width) : undefined, + codedHeight: track.height != null ? u53(track.height) : undefined, + framerate: track.framerate, + bitrate: track.bitrate != null ? u53(track.bitrate) : undefined, + }; +} + +function toAudioConfig(track: Msf.Track): Catalog.AudioConfig | undefined { + if (!track.codec) return undefined; + + return { + codec: track.codec, + container: toContainer(track), + description: track.packaging !== "cmaf" && track.initData ? base64ToHex(track.initData) : undefined, + sampleRate: u53(track.samplerate ?? 48000), + numberOfChannels: u53(track.channelConfig ? Number.parseInt(track.channelConfig, 10) : 2), + bitrate: track.bitrate != null ? u53(track.bitrate) : undefined, + }; +} + +/** Convert an MSF catalog to a hang catalog Root. */ +export function toHang(msf: Msf.Catalog): Catalog.Root { + const videoRenditions: Record = {}; + const audioRenditions: Record = {}; + + for (const track of msf.tracks) { + if (track.role === "video") { + const config = toVideoConfig(track); + if (config) videoRenditions[track.name] = config; + } else if (track.role === "audio") { + const config = toAudioConfig(track); + if (config) audioRenditions[track.name] = config; + } + } + + const root: Catalog.Root = {}; + + if (Object.keys(videoRenditions).length > 0) { + root.video = { renditions: videoRenditions }; + } + + if (Object.keys(audioRenditions).length > 0) { + root.audio = { renditions: audioRenditions }; + } + + return root; +} diff --git a/package.json b/package.json index d08b306c8..039e35e29 100644 --- a/package.json +++ b/package.json @@ -12,6 +12,7 @@ "js/clock", "js/token", "js/hang", + "js/msf", "js/ui-core", "js/watch", "js/publish", From 107045c4c899f48fd2bc2d9ea8387e0ce16314e4 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Tue, 24 Mar 2026 12:35:34 -0700 Subject: [PATCH 13/14] Fix catalog race bug, propagate subscribe priority, and avoid guard across awaits - broadcast.ts: Use Promise.any so a failed hang fetch no longer short-circuits MSF - lite/subscriber.rs: Read subscription params from consumer instead of hardcoding priority: 0 - fmp4.rs: Restructure to hold CatalogGuard only during catalog mutation, not across awaits - Minor: extract magic numbers, harden base64ToHex, remove redundant alias/nullish coalescing Co-Authored-By: Claude Opus 4.6 (1M context) --- js/watch/src/broadcast.ts | 29 ++++-- js/watch/src/msf.ts | 27 +++-- rs/moq-lite/src/ietf/subscriber.rs | 3 +- rs/moq-lite/src/lite/subscriber.rs | 17 +-- rs/moq-mux/src/convert/fmp4.rs | 159 ++++++++++++++++++++--------- 5 files changed, 160 insertions(+), 75 deletions(-) diff --git a/js/watch/src/broadcast.ts b/js/watch/src/broadcast.ts index 2f99a52a1..0300adef9 100644 --- a/js/watch/src/broadcast.ts +++ b/js/watch/src/broadcast.ts @@ -8,6 +8,9 @@ import { toHang } from "./msf"; export type CatalogFormat = "hang" | "msf"; +/** Delay (ms) before attempting MSF catalog fetch, giving hang format a headstart. */ +const HANG_HEADSTART_MS = 100; + export interface BroadcastProps { connection?: Moq.Connection.Established | Signal; @@ -108,21 +111,29 @@ export class Broadcast { effect.spawn(async () => { try { - // Race the first catalog fetch, giving hang a 100ms headstart + // Race the first catalog fetch, giving hang a headstart. + // Wrap each fetch so undefined results reject, ensuring only + // successful fetches compete (via Promise.any). const hangFetch = hangTrack - ? Catalog.fetch(hangTrack).then((r) => (r ? { kind: "hang" as const, root: r } : undefined)) + ? Catalog.fetch(hangTrack).then((r) => { + if (r) return { kind: "hang" as const, root: r }; + throw new Error("hang catalog empty"); + }) : undefined; const msfFetch = msfTrack - ? new Promise((r) => setTimeout(r, 100)) + ? new Promise((r) => setTimeout(r, HANG_HEADSTART_MS)) .then(() => Msf.fetch(msfTrack)) - .then((c) => (c ? { kind: "msf" as const, root: toHang(c) } : undefined)) + .then((c) => { + if (c) return { kind: "msf" as const, root: toHang(c) }; + throw new Error("msf catalog empty"); + }) : undefined; - const candidates = [effect.cancel, hangFetch, msfFetch].filter( - (c): c is NonNullable => c != null, - ); - const first = await Promise.race(candidates); + const candidates = [hangFetch, msfFetch].filter((c): c is NonNullable => c != null); + if (candidates.length === 0) return; + + const first = await Promise.race([effect.cancel.then(() => undefined), Promise.any(candidates)]); if (!first) return; // Close the loser @@ -144,7 +155,7 @@ export class Broadcast { effect.cancel, Catalog.fetch(hangTrack as Moq.Track), ]); - return update ?? undefined; + return update; } : async () => { const update = await Promise.race([effect.cancel, Msf.fetch(msfTrack as Moq.Track)]); diff --git a/js/watch/src/msf.ts b/js/watch/src/msf.ts index c86953b3f..ed61e637a 100644 --- a/js/watch/src/msf.ts +++ b/js/watch/src/msf.ts @@ -2,14 +2,21 @@ import type * as Catalog from "@moq/hang/catalog"; import { u53 } from "@moq/hang/catalog"; import type * as Msf from "@moq/msf"; -// Convert base64 string to hex string -function base64ToHex(b64: string): string { - const raw = atob(b64); - let hex = ""; - for (let i = 0; i < raw.length; i++) { - hex += raw.charCodeAt(i).toString(16).padStart(2, "0"); +const DEFAULT_SAMPLE_RATE = 48000; +const DEFAULT_NUMBER_OF_CHANNELS = 2; + +// Convert base64 string to hex string, returning undefined on invalid input. +function base64ToHex(b64: string): string | undefined { + try { + const raw = atob(b64); + let hex = ""; + for (let i = 0; i < raw.length; i++) { + hex += raw.charCodeAt(i).toString(16).padStart(2, "0"); + } + return hex; + } catch { + return undefined; } - return hex; } function toContainer(track: Msf.Track): Catalog.Container { @@ -40,8 +47,10 @@ function toAudioConfig(track: Msf.Track): Catalog.AudioConfig | undefined { codec: track.codec, container: toContainer(track), description: track.packaging !== "cmaf" && track.initData ? base64ToHex(track.initData) : undefined, - sampleRate: u53(track.samplerate ?? 48000), - numberOfChannels: u53(track.channelConfig ? Number.parseInt(track.channelConfig, 10) : 2), + sampleRate: u53(track.samplerate ?? DEFAULT_SAMPLE_RATE), + numberOfChannels: u53( + track.channelConfig ? Number.parseInt(track.channelConfig, 10) : DEFAULT_NUMBER_OF_CHANNELS, + ), bitrate: track.bitrate != null ? u53(track.bitrate) : undefined, }; } diff --git a/rs/moq-lite/src/ietf/subscriber.rs b/rs/moq-lite/src/ietf/subscriber.rs index cb312fa08..f1da84a1a 100644 --- a/rs/moq-lite/src/ietf/subscriber.rs +++ b/rs/moq-lite/src/ietf/subscriber.rs @@ -518,8 +518,7 @@ impl Subscriber { Ok(()) } - async fn run_subscribe(&mut self, broadcast_path: Path<'_>, mut track: TrackProducer) { - let broadcast = broadcast_path; + async fn run_subscribe(&mut self, broadcast: Path<'_>, mut track: TrackProducer) { let request_id = match self.control.next_request_id().await { Ok(id) => id, Err(err) => { diff --git a/rs/moq-lite/src/lite/subscriber.rs b/rs/moq-lite/src/lite/subscriber.rs index c649dc4a7..250dedbeb 100644 --- a/rs/moq-lite/src/lite/subscriber.rs +++ b/rs/moq-lite/src/lite/subscriber.rs @@ -1,7 +1,6 @@ use std::{ collections::{HashMap, hash_map::Entry}, sync::{Arc, atomic}, - time::Duration, }; use crate::{ @@ -187,15 +186,21 @@ impl Subscriber { self.subscribes.lock().insert(id, track.clone()); + // Read the initial subscription parameters from the consumer. + let sub = match track.subscription().await { + Some(sub) => sub, + None => return, // Consumer already dropped. + }; + let msg = lite::Subscribe { id, broadcast: broadcast_path.to_owned(), track: (&track.info.name).into(), - priority: 0, - ordered: false, - max_latency: Duration::ZERO, - start_group: None, - end_group: None, + priority: sub.priority, + ordered: sub.ordered, + max_latency: sub.max_latency, + start_group: sub.start, + end_group: sub.end, }; tracing::info!(id, broadcast = %self.log_path(&broadcast_path), track = %track_name, "subscribe started"); diff --git a/rs/moq-mux/src/convert/fmp4.rs b/rs/moq-mux/src/convert/fmp4.rs index 003e533d7..d4648fc91 100644 --- a/rs/moq-mux/src/convert/fmp4.rs +++ b/rs/moq-mux/src/convert/fmp4.rs @@ -35,80 +35,143 @@ impl Fmp4 { let catalog = catalog_consumer.next().await?.context("empty catalog")?; let mut output_catalog = catalog_producer.clone(); - let mut guard = output_catalog.lock(); let mut tasks = tokio::task::JoinSet::new(); + // Collect track subscriptions and output tracks first (async), + // then update the catalog guard only for the minimal mutation window. + struct VideoTask { + name: String, + config: VideoConfig, + input: moq_lite::TrackSubscriber, + output: moq_lite::TrackProducer, + convert: Option<(Vec, u64)>, // (init_data, timescale) for Legacy conversion + } + struct AudioTask { + name: String, + config: AudioConfig, + input: moq_lite::TrackSubscriber, + output: moq_lite::TrackProducer, + convert: Option<(Vec, u64)>, // (init_data, timescale) for Legacy conversion + } + + let mut video_tasks = Vec::new(); for (name, config) in &catalog.video.renditions { let input_track = self .input .subscribe_track(&moq_lite::Track::new(name.clone()), moq_lite::Subscription::default()) .await?; + let output_track = broadcast.create_track(moq_lite::Track::new(name.clone()))?; - match &config.container { - Container::Cmaf { .. } => { - guard.video.renditions.insert(name.clone(), config.clone()); - let output_track = broadcast.create_track(moq_lite::Track::new(name.clone()))?; - let track_name = name.clone(); - tasks.spawn(async move { - if let Err(e) = passthrough_track(input_track, output_track).await { - tracing::error!(%e, track = %track_name, "passthrough_track failed"); - } - }); - } + let convert = match &config.container { + Container::Cmaf { .. } => None, Container::Legacy => { let init_data = build_video_init(config)?; let timescale = guess_video_timescale(config); + Some((init_data, timescale)) + } + }; - let mut cmaf_config = config.clone(); - cmaf_config.container = Container::Cmaf { - init_data: base64::engine::general_purpose::STANDARD.encode(&init_data), - }; - guard.video.renditions.insert(name.clone(), cmaf_config); + video_tasks.push(VideoTask { + name: name.clone(), + config: config.clone(), + input: input_track, + output: output_track, + convert, + }); + } - let output_track = broadcast.create_track(moq_lite::Track::new(name.clone()))?; + let mut audio_tasks = Vec::new(); + for (name, config) in &catalog.audio.renditions { + let input_track = self + .input + .subscribe_track(&moq_lite::Track::new(name.clone()), moq_lite::Subscription::default()) + .await?; + let output_track = broadcast.create_track(moq_lite::Track::new(name.clone()))?; + + let convert = match &config.container { + Container::Cmaf { .. } => None, + Container::Legacy => { + let init_data = build_audio_init(config)?; + let timescale = config.sample_rate as u64; + Some((init_data, timescale)) + } + }; - let track_name = name.clone(); + audio_tasks.push(AudioTask { + name: name.clone(), + config: config.clone(), + input: input_track, + output: output_track, + convert, + }); + } + + // Now acquire the guard only for catalog mutations (no awaits). + { + let mut guard = output_catalog.lock(); + for task in &video_tasks { + match &task.convert { + None => { + guard.video.renditions.insert(task.name.clone(), task.config.clone()); + } + Some((init_data, _)) => { + let mut cmaf_config = task.config.clone(); + cmaf_config.container = Container::Cmaf { + init_data: base64::engine::general_purpose::STANDARD.encode(init_data), + }; + guard.video.renditions.insert(task.name.clone(), cmaf_config); + } + } + } + for task in &audio_tasks { + match &task.convert { + None => { + guard.audio.renditions.insert(task.name.clone(), task.config.clone()); + } + Some((init_data, _)) => { + let mut cmaf_config = task.config.clone(); + cmaf_config.container = Container::Cmaf { + init_data: base64::engine::general_purpose::STANDARD.encode(init_data), + }; + guard.audio.renditions.insert(task.name.clone(), cmaf_config); + } + } + } + } + + // Spawn track conversion/passthrough tasks. + for task in video_tasks { + let track_name = task.name; + match task.convert { + None => { + tasks.spawn(async move { + if let Err(e) = passthrough_track(task.input, task.output).await { + tracing::error!(%e, track = %track_name, "passthrough_track failed"); + } + }); + } + Some((_, timescale)) => { tasks.spawn(async move { - if let Err(e) = convert_legacy_to_cmaf(input_track, output_track, timescale, true).await { + if let Err(e) = convert_legacy_to_cmaf(task.input, task.output, timescale, true).await { tracing::error!(%e, track = %track_name, "convert_legacy_to_cmaf failed"); } }); } } } - - for (name, config) in &catalog.audio.renditions { - let input_track = self - .input - .subscribe_track(&moq_lite::Track::new(name.clone()), moq_lite::Subscription::default()) - .await?; - - match &config.container { - Container::Cmaf { .. } => { - guard.audio.renditions.insert(name.clone(), config.clone()); - let output_track = broadcast.create_track(moq_lite::Track::new(name.clone()))?; - let track_name = name.clone(); + for task in audio_tasks { + let track_name = task.name; + match task.convert { + None => { tasks.spawn(async move { - if let Err(e) = passthrough_track(input_track, output_track).await { + if let Err(e) = passthrough_track(task.input, task.output).await { tracing::error!(%e, track = %track_name, "passthrough_track failed"); } }); } - Container::Legacy => { - let init_data = build_audio_init(config)?; - - let mut cmaf_config = config.clone(); - cmaf_config.container = Container::Cmaf { - init_data: base64::engine::general_purpose::STANDARD.encode(&init_data), - }; - guard.audio.renditions.insert(name.clone(), cmaf_config); - - let output_track = broadcast.create_track(moq_lite::Track::new(name.clone()))?; - - let timescale = config.sample_rate as u64; - let track_name = name.clone(); + Some((_, timescale)) => { tasks.spawn(async move { - if let Err(e) = convert_legacy_to_cmaf(input_track, output_track, timescale, false).await { + if let Err(e) = convert_legacy_to_cmaf(task.input, task.output, timescale, false).await { tracing::error!(%e, track = %track_name, "convert_legacy_to_cmaf failed"); } }); @@ -116,8 +179,6 @@ impl Fmp4 { } } - drop(guard); - // Keep broadcast and catalog alive until all track tasks complete. while tasks.join_next().await.is_some() {} From d8cf7330ef3056a9d555737f5f7267df36080ed6 Mon Sep 17 00:00:00 2001 From: Luke Curley Date: Wed, 1 Apr 2026 13:38:18 -0700 Subject: [PATCH 14/14] Add catalog format attribute to moq-watch and fix review findings - Add `catalog` attribute to accepting "hang", "msf", or "auto" with zod validation; default changed from racing both to "hang" only - Guard channelConfig parsing in MSF catalog conversion to handle NaN - Propagate subscriber's start_group/end_group into Subscription Co-Authored-By: Claude Opus 4.6 (1M context) --- bun.lock | 1 + js/watch/package.json | 3 ++- js/watch/src/broadcast.ts | 11 ++++++++--- js/watch/src/element.ts | 25 +++++++++++++++++++++++-- js/watch/src/msf.ts | 6 +++++- rs/moq-lite/src/lite/publisher.rs | 8 +++++++- 6 files changed, 46 insertions(+), 8 deletions(-) diff --git a/bun.lock b/bun.lock index 3bb663d45..b4121e26a 100644 --- a/bun.lock +++ b/bun.lock @@ -188,6 +188,7 @@ "@moq/msf": "workspace:^", "@moq/signals": "workspace:^", "@moq/ui-core": "workspace:^", + "zod": "^4.1.5", }, "devDependencies": { "@types/audioworklet": "^0.0.77", diff --git a/js/watch/package.json b/js/watch/package.json index d04843488..637355c6b 100644 --- a/js/watch/package.json +++ b/js/watch/package.json @@ -28,7 +28,8 @@ "@moq/lite": "workspace:^", "@moq/msf": "workspace:^", "@moq/signals": "workspace:^", - "@moq/ui-core": "workspace:^" + "@moq/ui-core": "workspace:^", + "zod": "^4.1.5" }, "devDependencies": { "@types/audioworklet": "^0.0.77", diff --git a/js/watch/src/broadcast.ts b/js/watch/src/broadcast.ts index 0300adef9..c607cb6bb 100644 --- a/js/watch/src/broadcast.ts +++ b/js/watch/src/broadcast.ts @@ -3,10 +3,15 @@ import type * as Moq from "@moq/lite"; import { Path } from "@moq/lite"; import * as Msf from "@moq/msf"; import { Effect, type Getter, Signal } from "@moq/signals"; +import { z } from "zod"; import { toHang } from "./msf"; -export type CatalogFormat = "hang" | "msf"; +export const catalogFormatSchema = z.enum(["hang", "msf"]); +export type CatalogFormat = z.infer; + +export const catalogAttrSchema = z.enum(["hang", "msf", "auto"]); +export type CatalogAttr = z.infer; /** Delay (ms) before attempting MSF catalog fetch, giving hang format a headstart. */ const HANG_HEADSTART_MS = 100; @@ -28,7 +33,7 @@ export interface BroadcastProps { // Defaults to false; pass true to wait for an announcement before subscribing. reload?: boolean | Signal; - // Which catalog formats to try. Default: ["hang", "msf"] + // Which catalog formats to try. Default: ["hang"] catalog?: CatalogFormat[] | Signal; } @@ -59,7 +64,7 @@ export class Broadcast { this.name = Signal.from(props?.name ?? Path.empty()); this.enabled = Signal.from(props?.enabled ?? false); this.reload = Signal.from(props?.reload ?? false); - this.catalogFormats = Signal.from(props?.catalog ?? (["hang", "msf"] as CatalogFormat[])); + this.catalogFormats = Signal.from(props?.catalog ?? (["hang"] as CatalogFormat[])); this.#announced = props?.announced ?? new Signal(new Set()); diff --git a/js/watch/src/element.ts b/js/watch/src/element.ts index 6302083dd..92f2971ae 100644 --- a/js/watch/src/element.ts +++ b/js/watch/src/element.ts @@ -2,10 +2,17 @@ import type { Time } from "@moq/lite"; import * as Moq from "@moq/lite"; import { Effect, Signal } from "@moq/signals"; import { MultiBackend } from "./backend"; -import { Broadcast } from "./broadcast"; +import { Broadcast, type CatalogAttr, type CatalogFormat, catalogAttrSchema } from "./broadcast"; import { Sync } from "./sync"; -const OBSERVED = ["url", "name", "paused", "volume", "muted", "reload", "jitter"] as const; +function parseCatalogAttr(value: string | null): CatalogFormat[] { + const parsed = catalogAttrSchema.safeParse(value); + if (!parsed.success) return ["hang"]; + if (parsed.data === "auto") return ["hang", "msf"]; + return [parsed.data]; +} + +const OBSERVED = ["url", "name", "paused", "volume", "muted", "reload", "jitter", "catalog"] as const; type Observed = (typeof OBSERVED)[number]; // Close everything when this element is garbage collected. @@ -152,6 +159,8 @@ export default class MoqWatch extends HTMLElement { this.broadcast.reload.set(newValue !== null); } else if (name === "jitter") { this.backend.jitter.set((newValue ? Number.parseFloat(newValue) : 100) as Time.Milli); + } else if (name === "catalog") { + this.broadcast.catalogFormats.set(parseCatalogAttr(newValue)); } else { const exhaustive: never = name; throw new Error(`Invalid attribute: ${exhaustive}`); @@ -213,6 +222,18 @@ export default class MoqWatch extends HTMLElement { set jitter(value: number) { this.backend.jitter.set(value as Time.Milli); } + + get catalog(): CatalogFormat[] { + return this.broadcast.catalogFormats.peek(); + } + + set catalog(value: CatalogAttr | CatalogFormat[]) { + if (typeof value === "string") { + this.broadcast.catalogFormats.set(parseCatalogAttr(value)); + } else { + this.broadcast.catalogFormats.set(value); + } + } } customElements.define("moq-watch", MoqWatch); diff --git a/js/watch/src/msf.ts b/js/watch/src/msf.ts index ed61e637a..37e864671 100644 --- a/js/watch/src/msf.ts +++ b/js/watch/src/msf.ts @@ -49,7 +49,11 @@ function toAudioConfig(track: Msf.Track): Catalog.AudioConfig | undefined { description: track.packaging !== "cmaf" && track.initData ? base64ToHex(track.initData) : undefined, sampleRate: u53(track.samplerate ?? DEFAULT_SAMPLE_RATE), numberOfChannels: u53( - track.channelConfig ? Number.parseInt(track.channelConfig, 10) : DEFAULT_NUMBER_OF_CHANNELS, + (() => { + if (!track.channelConfig) return DEFAULT_NUMBER_OF_CHANNELS; + const parsed = Number.parseInt(track.channelConfig, 10); + return Number.isFinite(parsed) ? parsed : DEFAULT_NUMBER_OF_CHANNELS; + })(), ), bitrate: track.bitrate != null ? u53(track.bitrate) : undefined, }; diff --git a/rs/moq-lite/src/lite/publisher.rs b/rs/moq-lite/src/lite/publisher.rs index 0f5482349..f7e9e768d 100644 --- a/rs/moq-lite/src/lite/publisher.rs +++ b/rs/moq-lite/src/lite/publisher.rs @@ -259,7 +259,13 @@ impl Publisher { let broadcast = consumer.ok_or(Error::UnknownBroadcast)?; let track = broadcast.consume_track(&track)?; - let subscriber = track.subscribe(Subscription::default()).await?; + let subscriber = track + .subscribe(Subscription { + start: subscribe.start_group, + end: subscribe.end_group, + ..Default::default() + }) + .await?; let info = lite::SubscribeOk { priority: 0,