Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 53 additions & 8 deletions cms/djangoapps/contentstore/views/tests/test_transcripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,14 +195,17 @@ def clean_temporary_transcripts(self):
self.bad_name_srt_file.close()
self.bom_srt_file.close()

def upload_transcript(self, locator, transcript_file):
def upload_transcript(self, locator, transcript_file, edx_video_id=None):
"""
Uploads a transcript for a video
"""
payload = {}
if locator:
payload.update({'locator': locator})

if edx_video_id is not None:
payload.update({'edx_video_id': edx_video_id})

if transcript_file:
payload.update({'transcript-file': transcript_file})

Expand Down Expand Up @@ -233,7 +236,7 @@ def test_transcript_upload_success(self, edx_video_id, include_bom):

# Upload a transcript
transcript_file = self.bom_srt_file if include_bom else self.good_srt_file
response = self.upload_transcript(self.video_usage_key, transcript_file)
response = self.upload_transcript(self.video_usage_key, transcript_file, '')

# Verify the response
self.assert_response(response, expected_status_code=200, expected_message='Success')
Expand All @@ -258,7 +261,7 @@ def test_transcript_upload_without_locator(self):
"""
Test that transcript upload validation fails if the video locator is missing
"""
response = self.upload_transcript(locator=None, transcript_file=self.good_srt_file)
response = self.upload_transcript(locator=None, transcript_file=self.good_srt_file, edx_video_id='')
self.assert_response(
response,
expected_status_code=400,
Expand All @@ -269,7 +272,7 @@ def test_transcript_upload_without_file(self):
"""
Test that transcript upload validation fails if transcript file is missing
"""
response = self.upload_transcript(locator=self.video_usage_key, transcript_file=None)
response = self.upload_transcript(locator=self.video_usage_key, transcript_file=None, edx_video_id='')
self.assert_response(
response,
expected_status_code=400,
Expand All @@ -280,7 +283,11 @@ def test_transcript_upload_bad_format(self):
"""
Test that transcript upload validation fails if transcript format is not SRT
"""
response = self.upload_transcript(locator=self.video_usage_key, transcript_file=self.bad_name_srt_file)
response = self.upload_transcript(
locator=self.video_usage_key,
transcript_file=self.bad_name_srt_file,
edx_video_id=''
)
self.assert_response(
response,
expected_status_code=400,
Expand All @@ -292,7 +299,11 @@ def test_transcript_upload_bad_content(self):
Test that transcript upload validation fails in case of bad transcript content.
"""
# Request to upload transcript for the video
response = self.upload_transcript(locator=self.video_usage_key, transcript_file=self.bad_data_srt_file)
response = self.upload_transcript(
locator=self.video_usage_key,
transcript_file=self.bad_data_srt_file,
edx_video_id=''
)
self.assert_response(
response,
expected_status_code=400,
Expand All @@ -306,7 +317,7 @@ def test_transcript_upload_unknown_category(self):
# non_video module setup - i.e. an item whose category is not 'video'.
usage_key = self.create_non_video_module()
# Request to upload transcript for the item
response = self.upload_transcript(locator=usage_key, transcript_file=self.good_srt_file)
response = self.upload_transcript(locator=usage_key, transcript_file=self.good_srt_file, edx_video_id='')
self.assert_response(
response,
expected_status_code=400,
Expand All @@ -318,13 +329,47 @@ def test_transcript_upload_non_existent_item(self):
Test that transcript upload validation fails in case of invalid item's locator.
"""
# Request to upload transcript for the item
response = self.upload_transcript(locator='non_existent_locator', transcript_file=self.good_srt_file)
response = self.upload_transcript(
locator='non_existent_locator',
transcript_file=self.good_srt_file,
edx_video_id=''
)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Cannot find item by locator.'
)

def test_transcript_upload_without_edx_video_id(self):
"""
Test that transcript upload validation fails if the `edx_video_id` is missing
"""
response = self.upload_transcript(locator=self.video_usage_key, transcript_file=self.good_srt_file)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Video ID is required.'
)

def test_transcript_upload_with_non_existant_edx_video_id(self):
"""
Test that transcript upload works as expected if `edx_video_id` set on
video descriptor is different from `edx_video_id` received in POST request.
"""
non_existant_edx_video_id = '1111-2222-3333-4444'

# Upload with non-existant `edx_video_id`
response = self.upload_transcript(
locator=self.video_usage_key,
transcript_file=self.good_srt_file,
edx_video_id=non_existant_edx_video_id
)
# Verify the response
self.assert_response(response, expected_status_code=400, expected_message='Invalid Video ID')

# Verify transcript does not exist for non-existant `edx_video_id`
self.assertIsNone(get_video_transcript_content(non_existant_edx_video_id, language_code=u'en'))


@ddt.ddt
class TestChooseTranscripts(BaseTranscripts):
Expand Down
19 changes: 15 additions & 4 deletions cms/djangoapps/contentstore/views/transcripts_ajax.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,19 +169,22 @@ def validate_transcript_upload_data(request):
error, validated_data = None, {}
data, files = request.POST, request.FILES
video_locator = data.get('locator')
edx_video_id = data.get('edx_video_id')
if not video_locator:
error = _(u'Video locator is required.')
elif 'transcript-file' not in files:
error = _(u'A transcript file is required.')
elif os.path.splitext(files['transcript-file'].name)[1][1:] != Transcript.SRT:
error = _(u'This transcript file type is not supported.')
elif 'edx_video_id' not in data:
error = _(u'Video ID is required.')

if not error:
error, video = validate_video_module(request, video_locator)
if not error:
validated_data.update({
'video': video,
'edx_video_id': clean_video_id(video.edx_video_id),
'edx_video_id': clean_video_id(edx_video_id) or clean_video_id(video.edx_video_id),
'transcript_file': files['transcript-file']
})

Expand Down Expand Up @@ -212,6 +215,8 @@ def upload_transcripts(request):
video.edx_video_id = edx_video_id
video.save_with_metadata(request.user)

response = JsonResponse({'edx_video_id': edx_video_id, 'status': 'Success'}, status=200)

try:
# Convert 'srt' transcript into the 'sjson' and upload it to
# configured transcript storage. For example, S3.
Expand All @@ -220,7 +225,7 @@ def upload_transcripts(request):
input_format=Transcript.SRT,
output_format=Transcript.SJSON
)
create_or_update_video_transcript(
transcript_created = create_or_update_video_transcript(
video_id=edx_video_id,
language_code=u'en',
metadata={
Expand All @@ -230,7 +235,9 @@ def upload_transcripts(request):
},
file_data=ContentFile(sjson_subs),
)
response = JsonResponse({'edx_video_id': edx_video_id, 'status': 'Success'}, status=200)

if transcript_created is None:
response = JsonResponse({'status': 'Invalid Video ID'}, status=400)

except (TranscriptsGenerationException, UnicodeDecodeError):

Expand Down Expand Up @@ -310,7 +317,8 @@ def check_transcripts(request):
transcripts_presence['status'] = 'Success'

try:
get_transcript_from_val(edx_video_id=item.edx_video_id, lang=u'en')
edx_video_id = clean_video_id(videos.get('edx_video_id'))
get_transcript_from_val(edx_video_id=edx_video_id, lang=u'en')
command = 'found'
except NotFoundError:
filename = 'subs_{0}.srt.sjson'.format(item.sub)
Expand Down Expand Up @@ -465,6 +473,9 @@ def _validate_transcripts_data(request):
for video_data in data.get('videos'):
if video_data['type'] == 'youtube':
videos['youtube'] = video_data['video']
elif video_data['type'] == 'edx_video_id':
if clean_video_id(video_data['video']):
videos['edx_video_id'] = video_data['video']
else: # do not add same html5 videos
if videos['html5'].get('video') != video_data['video']:
videos['html5'][video_data['video']] = video_data['mode']
Expand Down
2 changes: 1 addition & 1 deletion cms/static/cms/js/spec/main.js
Original file line number Diff line number Diff line change
Expand Up @@ -242,10 +242,10 @@
'js/spec/views/metadata_edit_spec',
'js/spec/views/textbook_spec',
'js/spec/views/upload_spec',
'js/spec/video/transcripts/message_manager_spec',
'js/spec/video/transcripts/utils_spec',
'js/spec/video/transcripts/editor_spec',
'js/spec/video/transcripts/videolist_spec',
'js/spec/video/transcripts/message_manager_spec',
'js/spec/video/transcripts/file_uploader_spec',
'js/spec/models/component_template_spec',
'js/spec/models/explicit_url_spec',
Expand Down
5 changes: 5 additions & 0 deletions cms/static/cms/js/spec/main_squire.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
'jquery.iframe-transport': 'xmodule_js/common_static/js/vendor/jQuery-File-Upload/js/jquery.iframe-transport', // eslint-disable-line max-len
'jquery.inputnumber': 'xmodule_js/common_static/js/vendor/html5-input-polyfills/number-polyfill',
'jquery.immediateDescendents': 'xmodule_js/common_static/js/src/jquery.immediateDescendents',
'jquery.ajaxQueue': 'xmodule_js/common_static/js/vendor/jquery.ajaxQueue',
'datepair': 'xmodule_js/common_static/js/vendor/timepicker/datepair',
'date': 'xmodule_js/common_static/js/vendor/date',
'text': 'xmodule_js/common_static/js/vendor/requirejs/text',
Expand Down Expand Up @@ -108,6 +109,10 @@
deps: ['jquery', 'tinymce'],
exports: 'jQuery.fn.tinymce'
},
'jquery.ajaxQueue': {
deps: ['jquery'],
exports: 'jQuery.fn.ajaxQueue'
},
'datepair': {
deps: ['jquery.ui', 'jquery.timepicker']
},
Expand Down
105 changes: 103 additions & 2 deletions cms/static/js/spec/video/transcripts/editor_spec.js
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ function($, Backbone, _, Utils, Editor, MetadataView, MetadataModel, MetadataCol
field_name: 'edx_video_id',
help: 'Specifies the video ID.',
options: [],
type: MetadataModel.GENERIC_TYPE,
type: 'VideoID',
value: 'basic tab video id'
},
models = [DisplayNameEntry, VideoListEntry, VideoIDEntry],
Expand All @@ -51,7 +51,8 @@ function($, Backbone, _, Utils, Editor, MetadataView, MetadataModel, MetadataCol
object: testData,
string: JSON.stringify(testData)
},
transcripts, $container;
component_locator = 'component_locator',
transcripts, $container, waitForEvent, editor;

var waitsForDisplayName = function(collection) {
return jasmine.waitUntil(function() {
Expand All @@ -76,15 +77,109 @@ function($, Backbone, _, Utils, Editor, MetadataView, MetadataModel, MetadataCol
Utils.Storage.remove('sub');
});

describe('Events', function() {
beforeEach(function() {
Utils.command.and.callThrough();
spyOn(Backbone, 'trigger').and.callThrough();
spyOn(Editor.prototype, 'destroy').and.callThrough();
spyOn(Editor.prototype, 'handleFieldChanged').and.callThrough();
spyOn(Editor.prototype, 'getLocator').and.returnValue(component_locator);

appendSetFixtures(
sandbox({ // eslint-disable-line no-undef
class: 'wrapper-comp-settings basic_metadata_edit',
'data-metadata': JSON.stringify({video_url: VideoListEntry, edx_video_id: VideoIDEntry})
})
);

appendSetFixtures(
$('<script>',
{
id: 'metadata-videolist-entry',
type: 'text/template'
}
).text(readFixtures('video/transcripts/metadata-videolist-entry.underscore'))
);

appendSetFixtures(
$('<script>',
{
id: 'metadata-string-entry',
type: 'text/template'
}
).text(readFixtures('metadata-string-entry.underscore'))
);

editor = new Editor({
el: $('.basic_metadata_edit')
});

// reset the already triggered events
Backbone.trigger.calls.reset();
// reset the manual call to `handleFieldChanged` we made in the `editor.js::initialize`
Editor.prototype.handleFieldChanged.calls.reset();
});

waitForEvent = function(eventName) {
var triggerCallArgs;
return jasmine.waitUntil(function() {
triggerCallArgs = Backbone.trigger.calls.mostRecent().args;
return Backbone.trigger.calls.count() === 1 && triggerCallArgs[0] === eventName;
});
};

afterEach(function() {
Backbone.trigger.calls.reset();
Editor.prototype.destroy.calls.reset();
Editor.prototype.handleFieldChanged.calls.reset();
});

it('handles transcripts:basicTabFieldChanged', function(done) {
var event = 'transcripts:basicTabFieldChanged';

Backbone.trigger(event);
waitForEvent(event)
.then(function() {
expect(Editor.prototype.handleFieldChanged).toHaveBeenCalled();
expect(Utils.command).toHaveBeenCalledWith(
'check',
component_locator,
[
{ mode: 'youtube', video: '12345678901', type: 'youtube' },
{ mode: 'html5', video: 'video', type: 'mp4' },
{ mode: 'html5', video: 'video', type: 'webm' },
{ mode: 'edx_video_id', type: 'edx_video_id', video: 'basic tab video id' }
]
);
}).always(done);
});

it('handles xblock:editorModalHidden', function(done) {
var event = 'xblock:editorModalHidden';

Backbone.trigger(event);
waitForEvent(event)
.then(function() {
expect(Editor.prototype.destroy).toHaveBeenCalled();
}).always(done);
});
});

describe('Test initialization', function() {
beforeEach(function() {
spyOn(MetadataView, 'Editor');
spyOn(Editor.prototype, 'handleFieldChanged');

transcripts = new Editor({
el: $container
});
});

afterEach(function() {
MetadataView.Editor.calls.reset();
Editor.prototype.handleFieldChanged.calls.reset();
});

$.each(metadataDict, function(index, val) {
it('toModels with argument as ' + index, function() {
expect(transcripts.toModels(val)).toEqual(models);
Expand Down Expand Up @@ -159,6 +254,7 @@ function($, Backbone, _, Utils, Editor, MetadataView, MetadataModel, MetadataCol

beforeEach(function() {
spyOn(MetadataView, 'Editor');
spyOn(Editor.prototype, 'handleFieldChanged');

transcripts = new Editor({
el: $container
Expand All @@ -182,6 +278,11 @@ function($, Backbone, _, Utils, Editor, MetadataView, MetadataModel, MetadataCol
);
});

afterEach(function() {
MetadataView.Editor.calls.reset();
Editor.prototype.handleFieldChanged.calls.reset();
});

describe('Test Advanced to Basic synchronization', function() {
it('Correct data', function(done) {
transcripts.syncBasicTab(metadataCollection, metadataView);
Expand Down
Loading