From bab70fd9d28528f1e3dae207fa5589993fb1ec8e Mon Sep 17 00:00:00 2001 From: mparisi76 Date: Thu, 10 Jan 2019 13:06:44 -0500 Subject: [PATCH] fix(id3): cuechange event not being triggered on audio-only HLS streams (#334) * cuechange event not being triggered on audio only HLS streams #130 * fix linting * change tabs to spaces * fix linting trailing space * add test to ensure id3 cues are parsed from audio-only streams --- src/mse/virtual-source-buffer.js | 6 ++-- test/mse/html.test.js | 58 ++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 2 deletions(-) diff --git a/src/mse/virtual-source-buffer.js b/src/mse/virtual-source-buffer.js index 48804bf21..dcfe6b7a7 100644 --- a/src/mse/virtual-source-buffer.js +++ b/src/mse/virtual-source-buffer.js @@ -479,8 +479,7 @@ export default class VirtualSourceBuffer extends videojs.EventTarget { sortedSegments.video.segments.unshift(sortedSegments.video.initSegment); sortedSegments.video.bytes += sortedSegments.video.initSegment.byteLength; this.concatAndAppendSegments_(sortedSegments.video, this.videoBuffer_); - // TODO: are video tracks the only ones with text tracks? - addTextTrackData(this, sortedSegments.captions, sortedSegments.metadata); + } else if (this.videoBuffer_ && (this.audioDisabled_ || !this.audioBuffer_)) { // The transmuxer did not return any bytes of video, meaning it was all trimmed // for gop alignment. Since we have a video buffer and audio is disabled, updateend @@ -491,6 +490,9 @@ export default class VirtualSourceBuffer extends videojs.EventTarget { triggerUpdateend = true; } + // Add text-track data for all + addTextTrackData(this, sortedSegments.captions, sortedSegments.metadata); + if (!this.audioDisabled_ && this.audioBuffer_) { this.concatAndAppendSegments_(sortedSegments.audio, this.audioBuffer_); } diff --git a/test/mse/html.test.js b/test/mse/html.test.js index 5d5976f04..49ec7e066 100644 --- a/test/mse/html.test.js +++ b/test/mse/html.test.js @@ -1230,6 +1230,64 @@ QUnit.test('translates metadata events into WebVTT cues', function(assert) { assert.strictEqual(cues[2].endTime, mediaSource.duration, 'sourceended is fired'); }); +QUnit.test('translates metadata events from audio-only stream into WebVTT cues', function(assert) { + let mediaSource = new videojs.MediaSource(); + let sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs=\"avc1.4d400d, mp4a.40.2\"'); + + mediaSource.duration = Infinity; + mediaSource.nativeMediaSource_.duration = 60; + + let types = []; + let metadata = [{ + cueTime: 12, + frames: [{ + data: 'This is a priv tag' + }] + }]; + + metadata.dispatchType = 0x10; + mediaSource.player_ = { + addRemoteTextTrack(options) { + types.push(options.kind); + return { + track: { + kind: options.kind, + label: options.label, + cues: [], + addCue(cue) { + this.cues.push(cue); + } + } + }; + }, + remoteTextTracks() { + } + }; + sourceBuffer.timestampOffset = 10; + + sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', new Uint8Array(1), { + metadata + })); + sourceBuffer.transmuxer_.onmessage(doneMessage); + + assert.strictEqual( + sourceBuffer.metadataTrack_.inBandMetadataTrackDispatchType, + 16, + 'in-band metadata track dispatch type correctly set' + ); + let cues = sourceBuffer.metadataTrack_.cues; + + assert.strictEqual(types.length, 1, 'created one text track'); + assert.strictEqual(types[0], 'metadata', 'the type was metadata'); + assert.strictEqual(cues.length, 1, 'created one cue'); + assert.strictEqual(cues[0].text, 'This is a priv tag', 'included the text'); + assert.strictEqual(cues[0].startTime, 22, 'started at twenty two'); + assert.strictEqual(cues[0].endTime, Number.MAX_VALUE, 'ended at the maximum value'); + mediaSource.duration = 100; + mediaSource.trigger('sourceended'); + assert.strictEqual(cues[0].endTime, mediaSource.duration, 'sourceended is fired'); +}); + QUnit.test('does not wrap mp4 source buffers', function(assert) { let mediaSource = new videojs.MediaSource();