Home Reference Source

src/controller/timeline-controller.ts

  1. import { Events } from '../events';
  2. import Cea608Parser, { CaptionScreen } from '../utils/cea-608-parser';
  3. import OutputFilter from '../utils/output-filter';
  4. import { parseWebVTT } from '../utils/webvtt-parser';
  5. import {
  6. sendAddTrackEvent,
  7. clearCurrentCues,
  8. addCueToTrack,
  9. removeCuesInRange,
  10. } from '../utils/texttrack-utils';
  11. import { parseIMSC1, IMSC1_CODEC } from '../utils/imsc1-ttml-parser';
  12. import { PlaylistLevelType } from '../types/loader';
  13. import { Fragment } from '../loader/fragment';
  14. import {
  15. FragParsingUserdataData,
  16. FragLoadedData,
  17. FragDecryptedData,
  18. MediaAttachingData,
  19. ManifestLoadedData,
  20. InitPTSFoundData,
  21. SubtitleTracksUpdatedData,
  22. BufferFlushingData,
  23. FragLoadingData,
  24. } from '../types/events';
  25. import { logger } from '../utils/logger';
  26. import type Hls from '../hls';
  27. import type { ComponentAPI } from '../types/component-api';
  28. import type { HlsConfig } from '../config';
  29. import type { CuesInterface } from '../utils/cues';
  30. import type { MediaPlaylist } from '../types/media-playlist';
  31. import type { VTTCCs } from '../types/vtt';
  32.  
  33. type TrackProperties = {
  34. label: string;
  35. languageCode: string;
  36. media?: MediaPlaylist;
  37. };
  38.  
  39. type NonNativeCaptionsTrack = {
  40. _id?: string;
  41. label: string;
  42. kind: string;
  43. default: boolean;
  44. closedCaptions?: MediaPlaylist;
  45. subtitleTrack?: MediaPlaylist;
  46. };
  47.  
  48. export class TimelineController implements ComponentAPI {
  49. private hls: Hls;
  50. private media: HTMLMediaElement | null = null;
  51. private config: HlsConfig;
  52. private enabled: boolean = true;
  53. private Cues: CuesInterface;
  54. private textTracks: Array<TextTrack> = [];
  55. private tracks: Array<MediaPlaylist> = [];
  56. private initPTS: Array<number> = [];
  57. private timescale: Array<number> = [];
  58. private unparsedVttFrags: Array<FragLoadedData | FragDecryptedData> = [];
  59. private captionsTracks: Record<string, TextTrack> = {};
  60. private nonNativeCaptionsTracks: Record<string, NonNativeCaptionsTrack> = {};
  61. private readonly cea608Parser1!: Cea608Parser;
  62. private readonly cea608Parser2!: Cea608Parser;
  63. private lastSn: number = -1;
  64. private prevCC: number = -1;
  65. private vttCCs: VTTCCs = newVTTCCs();
  66. private captionsProperties: {
  67. textTrack1: TrackProperties;
  68. textTrack2: TrackProperties;
  69. textTrack3: TrackProperties;
  70. textTrack4: TrackProperties;
  71. };
  72.  
  73. constructor(hls: Hls) {
  74. this.hls = hls;
  75. this.config = hls.config;
  76. this.Cues = hls.config.cueHandler;
  77.  
  78. this.captionsProperties = {
  79. textTrack1: {
  80. label: this.config.captionsTextTrack1Label,
  81. languageCode: this.config.captionsTextTrack1LanguageCode,
  82. },
  83. textTrack2: {
  84. label: this.config.captionsTextTrack2Label,
  85. languageCode: this.config.captionsTextTrack2LanguageCode,
  86. },
  87. textTrack3: {
  88. label: this.config.captionsTextTrack3Label,
  89. languageCode: this.config.captionsTextTrack3LanguageCode,
  90. },
  91. textTrack4: {
  92. label: this.config.captionsTextTrack4Label,
  93. languageCode: this.config.captionsTextTrack4LanguageCode,
  94. },
  95. };
  96.  
  97. if (this.config.enableCEA708Captions) {
  98. const channel1 = new OutputFilter(this, 'textTrack1');
  99. const channel2 = new OutputFilter(this, 'textTrack2');
  100. const channel3 = new OutputFilter(this, 'textTrack3');
  101. const channel4 = new OutputFilter(this, 'textTrack4');
  102. this.cea608Parser1 = new Cea608Parser(1, channel1, channel2);
  103. this.cea608Parser2 = new Cea608Parser(3, channel3, channel4);
  104. }
  105.  
  106. this._registerListeners();
  107. }
  108.  
  109. private _registerListeners(): void {
  110. const { hls } = this;
  111. hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  112. hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  113. hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  114. hls.on(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  115. hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  116. hls.on(Events.FRAG_LOADING, this.onFragLoading, this);
  117. hls.on(Events.FRAG_LOADED, this.onFragLoaded, this);
  118. hls.on(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  119. hls.on(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  120. hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  121. hls.on(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  122. hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  123. }
  124.  
  125. private _unregisterListeners(): void {
  126. const { hls } = this;
  127. hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  128. hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  129. hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  130. hls.off(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  131. hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  132. hls.off(Events.FRAG_LOADING, this.onFragLoading, this);
  133. hls.off(Events.FRAG_LOADED, this.onFragLoaded, this);
  134. hls.off(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  135. hls.off(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  136. hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  137. hls.off(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  138. hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  139. }
  140.  
  141. public addCues(
  142. trackName: string,
  143. startTime: number,
  144. endTime: number,
  145. screen: CaptionScreen,
  146. cueRanges: Array<[number, number]>
  147. ) {
  148. // skip cues which overlap more than 50% with previously parsed time ranges
  149. let merged = false;
  150. for (let i = cueRanges.length; i--; ) {
  151. const cueRange = cueRanges[i];
  152. const overlap = intersection(
  153. cueRange[0],
  154. cueRange[1],
  155. startTime,
  156. endTime
  157. );
  158. if (overlap >= 0) {
  159. cueRange[0] = Math.min(cueRange[0], startTime);
  160. cueRange[1] = Math.max(cueRange[1], endTime);
  161. merged = true;
  162. if (overlap / (endTime - startTime) > 0.5) {
  163. return;
  164. }
  165. }
  166. }
  167. if (!merged) {
  168. cueRanges.push([startTime, endTime]);
  169. }
  170.  
  171. if (this.config.renderTextTracksNatively) {
  172. const track = this.captionsTracks[trackName];
  173. this.Cues.newCue(track, startTime, endTime, screen);
  174. } else {
  175. const cues = this.Cues.newCue(null, startTime, endTime, screen);
  176. this.hls.trigger(Events.CUES_PARSED, {
  177. type: 'captions',
  178. cues,
  179. track: trackName,
  180. });
  181. }
  182. }
  183.  
  184. // Triggered when an initial PTS is found; used for synchronisation of WebVTT.
  185. private onInitPtsFound(
  186. event: Events.INIT_PTS_FOUND,
  187. { frag, id, initPTS, timescale }: InitPTSFoundData
  188. ) {
  189. const { unparsedVttFrags } = this;
  190. if (id === 'main') {
  191. this.initPTS[frag.cc] = initPTS;
  192. this.timescale[frag.cc] = timescale;
  193. }
  194.  
  195. // Due to asynchronous processing, initial PTS may arrive later than the first VTT fragments are loaded.
  196. // Parse any unparsed fragments upon receiving the initial PTS.
  197. if (unparsedVttFrags.length) {
  198. this.unparsedVttFrags = [];
  199. unparsedVttFrags.forEach((frag) => {
  200. this.onFragLoaded(Events.FRAG_LOADED, frag as FragLoadedData);
  201. });
  202. }
  203. }
  204.  
  205. private getExistingTrack(trackName: string): TextTrack | null {
  206. const { media } = this;
  207. if (media) {
  208. for (let i = 0; i < media.textTracks.length; i++) {
  209. const textTrack = media.textTracks[i];
  210. if (textTrack[trackName]) {
  211. return textTrack;
  212. }
  213. }
  214. }
  215. return null;
  216. }
  217.  
  218. public createCaptionsTrack(trackName: string) {
  219. if (this.config.renderTextTracksNatively) {
  220. this.createNativeTrack(trackName);
  221. } else {
  222. this.createNonNativeTrack(trackName);
  223. }
  224. }
  225.  
  226. private createNativeTrack(trackName: string) {
  227. if (this.captionsTracks[trackName]) {
  228. return;
  229. }
  230. const { captionsProperties, captionsTracks, media } = this;
  231. const { label, languageCode } = captionsProperties[trackName];
  232. // Enable reuse of existing text track.
  233. const existingTrack = this.getExistingTrack(trackName);
  234. if (!existingTrack) {
  235. const textTrack = this.createTextTrack('captions', label, languageCode);
  236. if (textTrack) {
  237. // Set a special property on the track so we know it's managed by Hls.js
  238. textTrack[trackName] = true;
  239. captionsTracks[trackName] = textTrack;
  240. }
  241. } else {
  242. captionsTracks[trackName] = existingTrack;
  243. clearCurrentCues(captionsTracks[trackName]);
  244. sendAddTrackEvent(captionsTracks[trackName], media as HTMLMediaElement);
  245. }
  246. }
  247.  
  248. private createNonNativeTrack(trackName: string) {
  249. if (this.nonNativeCaptionsTracks[trackName]) {
  250. return;
  251. }
  252. // Create a list of a single track for the provider to consume
  253. const trackProperties: TrackProperties = this.captionsProperties[trackName];
  254. if (!trackProperties) {
  255. return;
  256. }
  257. const label = trackProperties.label as string;
  258. const track = {
  259. _id: trackName,
  260. label,
  261. kind: 'captions',
  262. default: trackProperties.media ? !!trackProperties.media.default : false,
  263. closedCaptions: trackProperties.media,
  264. };
  265. this.nonNativeCaptionsTracks[trackName] = track;
  266. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, { tracks: [track] });
  267. }
  268.  
  269. private createTextTrack(
  270. kind: TextTrackKind,
  271. label: string,
  272. lang?: string
  273. ): TextTrack | undefined {
  274. const media = this.media;
  275. if (!media) {
  276. return;
  277. }
  278. return media.addTextTrack(kind, label, lang);
  279. }
  280.  
  281. public destroy() {
  282. this._unregisterListeners();
  283. }
  284.  
  285. private onMediaAttaching(
  286. event: Events.MEDIA_ATTACHING,
  287. data: MediaAttachingData
  288. ) {
  289. this.media = data.media;
  290. this._cleanTracks();
  291. }
  292.  
  293. private onMediaDetaching() {
  294. const { captionsTracks } = this;
  295. Object.keys(captionsTracks).forEach((trackName) => {
  296. clearCurrentCues(captionsTracks[trackName]);
  297. delete captionsTracks[trackName];
  298. });
  299. this.nonNativeCaptionsTracks = {};
  300. }
  301.  
  302. private onManifestLoading() {
  303. this.lastSn = -1; // Detect discontinuity in fragment parsing
  304. this.prevCC = -1;
  305. this.vttCCs = newVTTCCs(); // Detect discontinuity in subtitle manifests
  306. this._cleanTracks();
  307. this.tracks = [];
  308. this.captionsTracks = {};
  309. this.nonNativeCaptionsTracks = {};
  310. this.textTracks = [];
  311. this.unparsedVttFrags = this.unparsedVttFrags || [];
  312. this.initPTS = [];
  313. this.timescale = [];
  314. if (this.cea608Parser1 && this.cea608Parser2) {
  315. this.cea608Parser1.reset();
  316. this.cea608Parser2.reset();
  317. }
  318. }
  319.  
  320. private _cleanTracks() {
  321. // clear outdated subtitles
  322. const { media } = this;
  323. if (!media) {
  324. return;
  325. }
  326. const textTracks = media.textTracks;
  327. if (textTracks) {
  328. for (let i = 0; i < textTracks.length; i++) {
  329. clearCurrentCues(textTracks[i]);
  330. }
  331. }
  332. }
  333.  
  334. private onSubtitleTracksUpdated(
  335. event: Events.SUBTITLE_TRACKS_UPDATED,
  336. data: SubtitleTracksUpdatedData
  337. ) {
  338. this.textTracks = [];
  339. const tracks: Array<MediaPlaylist> = data.subtitleTracks || [];
  340. const hasIMSC1 = tracks.some((track) => track.textCodec === IMSC1_CODEC);
  341. if (this.config.enableWebVTT || (hasIMSC1 && this.config.enableIMSC1)) {
  342. const sameTracks =
  343. this.tracks && tracks && this.tracks.length === tracks.length;
  344. this.tracks = tracks || [];
  345.  
  346. if (this.config.renderTextTracksNatively) {
  347. const inUseTracks = this.media ? this.media.textTracks : [];
  348.  
  349. this.tracks.forEach((track, index) => {
  350. let textTrack: TextTrack | undefined;
  351. if (index < inUseTracks.length) {
  352. let inUseTrack: TextTrack | null = null;
  353.  
  354. for (let i = 0; i < inUseTracks.length; i++) {
  355. if (canReuseVttTextTrack(inUseTracks[i], track)) {
  356. inUseTrack = inUseTracks[i];
  357. break;
  358. }
  359. }
  360.  
  361. // Reuse tracks with the same label, but do not reuse 608/708 tracks
  362. if (inUseTrack) {
  363. textTrack = inUseTrack;
  364. }
  365. }
  366. if (textTrack) {
  367. clearCurrentCues(textTrack);
  368. } else {
  369. textTrack = this.createTextTrack(
  370. 'subtitles',
  371. track.name,
  372. track.lang
  373. );
  374. if (textTrack) {
  375. textTrack.mode = 'disabled';
  376. }
  377. }
  378. if (textTrack) {
  379. (textTrack as any).groupId = track.groupId;
  380. this.textTracks.push(textTrack);
  381. }
  382. });
  383. } else if (!sameTracks && this.tracks && this.tracks.length) {
  384. // Create a list of tracks for the provider to consume
  385. const tracksList = this.tracks.map((track) => {
  386. return {
  387. label: track.name,
  388. kind: track.type.toLowerCase(),
  389. default: track.default,
  390. subtitleTrack: track,
  391. };
  392. });
  393. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, {
  394. tracks: tracksList,
  395. });
  396. }
  397. }
  398. }
  399.  
  400. private onManifestLoaded(
  401. event: Events.MANIFEST_LOADED,
  402. data: ManifestLoadedData
  403. ) {
  404. if (this.config.enableCEA708Captions && data.captions) {
  405. data.captions.forEach((captionsTrack) => {
  406. const instreamIdMatch = /(?:CC|SERVICE)([1-4])/.exec(
  407. captionsTrack.instreamId as string
  408. );
  409. if (!instreamIdMatch) {
  410. return;
  411. }
  412. const trackName = `textTrack${instreamIdMatch[1]}`;
  413. const trackProperties: TrackProperties = this.captionsProperties[
  414. trackName
  415. ];
  416. if (!trackProperties) {
  417. return;
  418. }
  419. trackProperties.label = captionsTrack.name;
  420. if (captionsTrack.lang) {
  421. // optional attribute
  422. trackProperties.languageCode = captionsTrack.lang;
  423. }
  424. trackProperties.media = captionsTrack;
  425. });
  426. }
  427. }
  428.  
  429. private onFragLoading(event: Events.FRAG_LOADING, data: FragLoadingData) {
  430. const { cea608Parser1, cea608Parser2, lastSn } = this;
  431. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  432. return;
  433. }
  434. // if this frag isn't contiguous, clear the parser so cues with bad start/end times aren't added to the textTrack
  435. if (data.frag.type === PlaylistLevelType.MAIN) {
  436. const sn = data.frag.sn;
  437. if (sn !== lastSn + 1) {
  438. if (cea608Parser1 && cea608Parser2) {
  439. cea608Parser1.reset();
  440. cea608Parser2.reset();
  441. }
  442. }
  443. this.lastSn = sn as number;
  444. }
  445. }
  446.  
  447. private onFragLoaded(event: Events.FRAG_LOADED, data: FragLoadedData) {
  448. const { frag, payload } = data;
  449. const { initPTS, unparsedVttFrags } = this;
  450. if (frag.type === PlaylistLevelType.SUBTITLE) {
  451. // If fragment is subtitle type, parse as WebVTT.
  452. if (payload.byteLength) {
  453. // We need an initial synchronisation PTS. Store fragments as long as none has arrived.
  454. if (!Number.isFinite(initPTS[frag.cc])) {
  455. unparsedVttFrags.push(data);
  456. if (initPTS.length) {
  457. // finish unsuccessfully, otherwise the subtitle-stream-controller could be blocked from loading new frags.
  458. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  459. success: false,
  460. frag,
  461. error: new Error('Missing initial subtitle PTS'),
  462. });
  463. }
  464. return;
  465. }
  466.  
  467. const decryptData = frag.decryptdata;
  468. // If the subtitles are not encrypted, parse VTTs now. Otherwise, we need to wait.
  469. if (
  470. decryptData == null ||
  471. decryptData.key == null ||
  472. decryptData.method !== 'AES-128'
  473. ) {
  474. const trackPlaylistMedia = this.tracks[frag.level];
  475. const vttCCs = this.vttCCs;
  476. if (!vttCCs[frag.cc]) {
  477. vttCCs[frag.cc] = {
  478. start: frag.start,
  479. prevCC: this.prevCC,
  480. new: true,
  481. };
  482. this.prevCC = frag.cc;
  483. }
  484. if (
  485. trackPlaylistMedia &&
  486. trackPlaylistMedia.textCodec === IMSC1_CODEC
  487. ) {
  488. this._parseIMSC1(frag, payload);
  489. } else {
  490. this._parseVTTs(frag, payload, vttCCs);
  491. }
  492. }
  493. } else {
  494. // In case there is no payload, finish unsuccessfully.
  495. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  496. success: false,
  497. frag,
  498. error: new Error('Empty subtitle payload'),
  499. });
  500. }
  501. }
  502. }
  503.  
  504. private _parseIMSC1(frag: Fragment, payload: ArrayBuffer) {
  505. const hls = this.hls;
  506. parseIMSC1(
  507. payload,
  508. this.initPTS[frag.cc],
  509. this.timescale[frag.cc],
  510. (cues) => {
  511. this._appendCues(cues, frag.level);
  512. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  513. success: true,
  514. frag: frag,
  515. });
  516. },
  517. (error) => {
  518. logger.log(`Failed to parse IMSC1: ${error}`);
  519. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  520. success: false,
  521. frag: frag,
  522. error,
  523. });
  524. }
  525. );
  526. }
  527.  
  528. private _parseVTTs(frag: Fragment, payload: ArrayBuffer, vttCCs: any) {
  529. const hls = this.hls;
  530. // Parse the WebVTT file contents.
  531. parseWebVTT(
  532. payload,
  533. this.initPTS[frag.cc],
  534. this.timescale[frag.cc],
  535. vttCCs,
  536. frag.cc,
  537. frag.start,
  538. (cues) => {
  539. this._appendCues(cues, frag.level);
  540. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  541. success: true,
  542. frag: frag,
  543. });
  544. },
  545. (error) => {
  546. this._fallbackToIMSC1(frag, payload);
  547. // Something went wrong while parsing. Trigger event with success false.
  548. logger.log(`Failed to parse VTT cue: ${error}`);
  549. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  550. success: false,
  551. frag: frag,
  552. error,
  553. });
  554. }
  555. );
  556. }
  557.  
  558. private _fallbackToIMSC1(frag: Fragment, payload: ArrayBuffer) {
  559. // If textCodec is unknown, try parsing as IMSC1. Set textCodec based on the result
  560. const trackPlaylistMedia = this.tracks[frag.level];
  561. if (!trackPlaylistMedia.textCodec) {
  562. parseIMSC1(
  563. payload,
  564. this.initPTS[frag.cc],
  565. this.timescale[frag.cc],
  566. () => {
  567. trackPlaylistMedia.textCodec = IMSC1_CODEC;
  568. this._parseIMSC1(frag, payload);
  569. },
  570. () => {
  571. trackPlaylistMedia.textCodec = 'wvtt';
  572. }
  573. );
  574. }
  575. }
  576.  
  577. private _appendCues(cues: VTTCue[], fragLevel: number) {
  578. const hls = this.hls;
  579. if (this.config.renderTextTracksNatively) {
  580. const textTrack = this.textTracks[fragLevel];
  581. // WebVTTParser.parse is an async method and if the currently selected text track mode is set to "disabled"
  582. // before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
  583. // and trying to access getCueById method of cues will throw an exception
  584. // Because we check if the mode is disabled, we can force check `cues` below. They can't be null.
  585. if (textTrack.mode === 'disabled') {
  586. return;
  587. }
  588. cues.forEach((cue) => addCueToTrack(textTrack, cue));
  589. } else {
  590. const currentTrack = this.tracks[fragLevel];
  591. const track = currentTrack.default ? 'default' : 'subtitles' + fragLevel;
  592. hls.trigger(Events.CUES_PARSED, { type: 'subtitles', cues, track });
  593. }
  594. }
  595.  
  596. private onFragDecrypted(
  597. event: Events.FRAG_DECRYPTED,
  598. data: FragDecryptedData
  599. ) {
  600. const { frag } = data;
  601. if (frag.type === PlaylistLevelType.SUBTITLE) {
  602. if (!Number.isFinite(this.initPTS[frag.cc])) {
  603. this.unparsedVttFrags.push((data as unknown) as FragLoadedData);
  604. return;
  605. }
  606. this.onFragLoaded(
  607. Events.FRAG_LOADED,
  608. (data as unknown) as FragLoadedData
  609. );
  610. }
  611. }
  612.  
  613. private onSubtitleTracksCleared() {
  614. this.tracks = [];
  615. this.captionsTracks = {};
  616. }
  617.  
  618. private onFragParsingUserdata(
  619. event: Events.FRAG_PARSING_USERDATA,
  620. data: FragParsingUserdataData
  621. ) {
  622. const { cea608Parser1, cea608Parser2 } = this;
  623. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  624. return;
  625. }
  626.  
  627. // If the event contains captions (found in the bytes property), push all bytes into the parser immediately
  628. // It will create the proper timestamps based on the PTS value
  629. for (let i = 0; i < data.samples.length; i++) {
  630. const ccBytes = data.samples[i].bytes;
  631. if (ccBytes) {
  632. const ccdatas = this.extractCea608Data(ccBytes);
  633. cea608Parser1.addData(data.samples[i].pts, ccdatas[0]);
  634. cea608Parser2.addData(data.samples[i].pts, ccdatas[1]);
  635. }
  636. }
  637. }
  638.  
  639. onBufferFlushing(
  640. event: Events.BUFFER_FLUSHING,
  641. { startOffset, endOffset, type }: BufferFlushingData
  642. ) {
  643. // Clear 608 CC cues from the back buffer
  644. // Forward cues are never removed because we can loose streamed 608 content from recent fragments
  645. if (!type || type === 'video') {
  646. const { media } = this;
  647. if (!media || media.currentTime < endOffset) {
  648. return;
  649. }
  650. const { captionsTracks } = this;
  651. Object.keys(captionsTracks).forEach((trackName) =>
  652. removeCuesInRange(captionsTracks[trackName], startOffset, endOffset)
  653. );
  654. }
  655. }
  656.  
  657. private extractCea608Data(byteArray: Uint8Array): number[][] {
  658. const count = byteArray[0] & 31;
  659. let position = 2;
  660. const actualCCBytes: number[][] = [[], []];
  661.  
  662. for (let j = 0; j < count; j++) {
  663. const tmpByte = byteArray[position++];
  664. const ccbyte1 = 0x7f & byteArray[position++];
  665. const ccbyte2 = 0x7f & byteArray[position++];
  666. const ccValid = (4 & tmpByte) !== 0;
  667. const ccType = 3 & tmpByte;
  668.  
  669. if (ccbyte1 === 0 && ccbyte2 === 0) {
  670. continue;
  671. }
  672.  
  673. if (ccValid) {
  674. if (ccType === 0 || ccType === 1) {
  675. actualCCBytes[ccType].push(ccbyte1);
  676. actualCCBytes[ccType].push(ccbyte2);
  677. }
  678. }
  679. }
  680. return actualCCBytes;
  681. }
  682. }
  683.  
  684. function canReuseVttTextTrack(inUseTrack, manifestTrack): boolean {
  685. return (
  686. inUseTrack &&
  687. inUseTrack.label === manifestTrack.name &&
  688. !(inUseTrack.textTrack1 || inUseTrack.textTrack2)
  689. );
  690. }
  691.  
  692. function intersection(x1: number, x2: number, y1: number, y2: number): number {
  693. return Math.min(x2, y2) - Math.max(x1, y1);
  694. }
  695.  
  696. function newVTTCCs(): VTTCCs {
  697. return {
  698. ccOffset: 0,
  699. presentationOffset: 0,
  700. 0: {
  701. start: 0,
  702. prevCC: -1,
  703. new: false,
  704. },
  705. };
  706. }