diff --git a/modules/theora/video_stream_theora.cpp b/modules/theora/video_stream_theora.cpp index 28a8b77283bf..63a4d9558a47 100644 --- a/modules/theora/video_stream_theora.cpp +++ b/modules/theora/video_stream_theora.cpp @@ -30,14 +30,18 @@ #include "video_stream_theora.h" +#include "core/hash_map.h" +#include "core/list.h" #include "core/os/os.h" #include "core/project_settings.h" #include "thirdparty/misc/yuv2rgb.h" +#include + int VideoStreamPlaybackTheora::buffer_data() { - char *buffer = ogg_sync_buffer(&oy, 4096); + char *buffer = ogg_sync_buffer(&oy, BUFFERSIZE); #ifdef THEORA_USE_THREAD_STREAMING @@ -45,7 +49,7 @@ int VideoStreamPlaybackTheora::buffer_data() { do { thread_sem->post(); - read = MIN(ring_buffer.data_left(), 4096); + read = MIN(ring_buffer.data_left(), BUFFERSIZE); if (read) { ring_buffer.read((uint8_t *)buffer, read); ogg_sync_wrote(&oy, read); @@ -59,7 +63,7 @@ int VideoStreamPlaybackTheora::buffer_data() { #else - int bytes = file->get_buffer((uint8_t *)buffer, 4096); + int bytes = file->get_buffer((uint8_t *)buffer, BUFFERSIZE); ogg_sync_wrote(&oy, bytes); return (bytes); @@ -552,11 +556,8 @@ void VideoStreamPlaybackTheora::update(float p_delta) { void VideoStreamPlaybackTheora::play() { - if (!playing) - time = 0; - else { + if (playing) stop(); - } playing = true; delay_compensation = ProjectSettings::get_singleton()->get("audio/video_delay_compensation_ms"); @@ -618,9 +619,234 @@ float VideoStreamPlaybackTheora::get_playback_position() const { return get_time(); }; -void VideoStreamPlaybackTheora::seek(float p_time){ +void VideoStreamPlaybackTheora::seek(float p_time) { + bool play_state = playing; //Save the playing state + stop(); + + struct _page_info { + size_t block_number; + double_t time; + ogg_int64_t granulepos; + }; + //https://xiph.org/oggz/doc/group__basics.html + float true_time = p_time - AudioServer::get_singleton()->get_output_latency() - delay_compensation; + p_time = fmax(0, true_time); + + size_t buffer_size = (size_t)BUFFERSIZE; //Cast BUFFERSIZE + size_t end_file = file->get_len(); + size_t start_file = 0; + size_t number_of_blocks = (end_file - start_file) / buffer_size; + + ogg_packet op; + size_t left = 0; + size_t right = number_of_blocks; + + struct _page_info left_page = { .time = 0, .block_number = 0, .granulepos = 0 }; + struct _page_info mid_page = { .time = 0, .block_number = 0, .granulepos = 0 }; + struct _page_info right_page = { .time = DBL_MAX, .block_number = right, .granulepos = 0x7FFFFFFFFFFFFFFF }; + HashMap page_info_table; + HashMap block_time; + + //Binary Search by finding the proper begin page + while (left <= right) { + //Seek to block + size_t mid_block = left + (right - left) / 2; + int block = mid_block; + + if (block_time.has(block)) { + //Check whether this block has been visited + break; + } + + //clear the sync state + ogg_sync_reset(&oy); + file->seek(block * buffer_size); + buffer_data(); + + bool next_midpoint = true; + while (true) { + //keep syncing until a page is found. Buffer is only 4k while ogg pages can be up to 65k in size + int ogg_page_sync_state = ogg_sync_pageout(&oy, &og); + if (ogg_page_sync_state == -1) { + //Give up when the file advances past the right boundary + if (buffer_data() == 0) { + right = mid_block; + break; + } else { + //increment block size we buffered the next block + block++; + } + } else { + if (ogg_page_sync_state == 0) { + //Check if I reached the end of the file + if (buffer_data() == 0) { + right = mid_block; + break; + } else { + block++; + } + } else { + //Only pages with a end packet have granulepos. Check the stream + if (ogg_page_packets(&og) > 0 && ogg_page_serialno(&og) == to.serialno) { + next_midpoint = false; + break; + } + } + } + } + if (next_midpoint) + continue; + + ogg_int64_t granulepos = ogg_page_granulepos(&og); + ogg_int64_t page_number = ogg_page_pageno(&og); + struct _page_info pg_info = { .time = th_granule_time(td, granulepos), .block_number = mid_block, .granulepos = granulepos }; + page_info_table.set(page_number, pg_info); + block_time.set(mid_block, pg_info.time); + mid_page = pg_info; + + //I can finally implement the binary search comparisons + if (abs(p_time - pg_info.time) < .001) { + //The video managed to be equal + right_page = pg_info; + break; + } + if (pg_info.time > p_time) { + if (pg_info.granulepos < right_page.granulepos) + right_page = pg_info; + right = mid_block; + } else { + if (pg_info.granulepos > left_page.granulepos) + left_page = pg_info; + left = mid_block; + } + } + //print_line(rtos(th_granule_time(td,mid_page.granulepos)) + " " + rtos(mid_page.time)); + //Now I have to find the closest lower keyframe + int current_block = mid_page.block_number; + //I have the midblock, check if is worthwhile process + if (mid_page.time > p_time || ogg_page_continued(&og)) { + current_block--; + } + + // Backtrack to find the keyframe + // Keyframes seem to reside on their own page + while (current_block >= 0) { + ogg_stream_reset(&to); + ogg_stream_reset(&vo); + ogg_sync_reset(&oy); + file->seek(current_block * buffer_size); + buffer_data(); + bool seeked_file = false; + bool keyframe_found = false; + while (!seeked_file) { + int ogg_page_sync_state = ogg_sync_pageout(&oy, &og); + if (ogg_page_sync_state == -1) { + ogg_page_sync_state = ogg_sync_pageout(&oy, &og); + } + while (ogg_page_sync_state == 0) { + buffer_data(); + seeked_file = true; + ogg_page_sync_state = ogg_sync_pageout(&oy, &og); + } + if (ogg_page_sync_state == 1) { + //Only queue pages with a single packet + if (ogg_page_packets(&og) == 1) { + queue_page(&og); + ogg_stream_packetpeek(&to, &op); //Just attempt it + while (ogg_stream_packetpeek(&to, &op) == 0) { + if (ogg_sync_pageout(&oy, &og) > 0) { + queue_page(&og); + } else { + buffer_data(); + } + } + if (th_packet_iskeyframe(&op)) { + videobuf_time = th_granule_time(td, op.granulepos); + if (videobuf_time < p_time) { + keyframe_found = true; + break; + } + } else + ogg_stream_packetout(&to, &op); + } + } + } + if(keyframe_found) { + break; + } + current_block--; + } + ogg_int64_t audio_granulepos = 0; + ogg_int64_t total_packets = 0; + vorbis_synthesis_restart(&vd); + //Set to the lowest processing level + pp_level = 0; + th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level)); + + //Process keyframe + ogg_int64_t video_granulepos; + th_decode_packetin(td, &op, &video_granulepos); + th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &video_granulepos, sizeof(video_granulepos)); + th_ycbcr_buffer yuv; + th_decode_ycbcr_out(td, yuv); //dump frame + ogg_stream_packetout(&to, &op); - // no + //decode video until the decoder catches up to the seek time + while (videobuf_time <= p_time) { + int ogg_sync_state = ogg_sync_pageout(&oy, &og); + while (ogg_sync_state < 1) { + buffer_data(); + ogg_sync_state = ogg_sync_pageout(&oy, &og); + } + if(ogg_page_serialno(&og) == to.serialno) { + queue_page(&og); + while (ogg_stream_packetout(&to, &op) > 0) { + ogg_int64_t tmp_granulepos; + th_decode_packetin(td, &op, &tmp_granulepos); + if (op.granulepos > 0) { + th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &op.granulepos, sizeof(op.granulepos)); + videobuf_time = th_granule_time(td, tmp_granulepos); + video_granulepos = tmp_granulepos; + } else { + videobuf_time = th_granule_time(td, video_granulepos++); + } + th_ycbcr_buffer yuv; + th_decode_ycbcr_out(td, yuv); //dump frames + } + //Needed to calculate the time without extra memory allocation + } else { + //Drop pages behind the seek + double end_music_time = vorbis_granule_time(&vd, ogg_page_granulepos(&og)); + if (end_music_time > p_time) { + queue_page(&og); + //Queue the page which music time is greater than the seek time. + audio_granulepos = ogg_page_granulepos(&og); + total_packets = ogg_page_packets(&og); + } + } + } + //Update the audioframe time + audio_frames_wrote = videobuf_time * vi.rate; + while(ogg_stream_packetout(&vo, &op) > 0) { + ogg_int64_t offset = vorbis_packet_blocksize(&vi, &op) * total_packets--; + double current_audio_time = vorbis_granule_time(&vd, audio_granulepos - offset); + double diff = current_audio_time - videobuf_time; + if( diff > 0 ) { + int blank_frames = diff * vi.rate * vi.channels; + const int AUXBUF_LEN = 4096; + float aux_buffer[AUXBUF_LEN]; + int m = MIN(AUXBUF_LEN / vi.channels, blank_frames); + for(int i = 0; i < m; i++) + aux_buffer[i] = 0; + int mixed = mix_callback(mix_udata, aux_buffer, m); + audio_frames_wrote = mixed + videobuf_time * vi.rate; + break; + } + } + time = videobuf_time; //Set the time to current frame + //Easier to call stop and play to reset the video + if(play_state) + play(); }; void VideoStreamPlaybackTheora::set_mix_callback(AudioMixCallback p_callback, void *p_userdata) { diff --git a/modules/theora/video_stream_theora.h b/modules/theora/video_stream_theora.h index 0c37d33358a6..fab7c33e0894 100644 --- a/modules/theora/video_stream_theora.h +++ b/modules/theora/video_stream_theora.h @@ -50,6 +50,7 @@ class VideoStreamPlaybackTheora : public VideoStreamPlayback { enum { MAX_FRAMES = 4, + BUFFERSIZE = 4096, }; //Image frames[MAX_FRAMES];