diff --git a/apertium_apy/apy.py b/apertium_apy/apy.py index 3479aa87a..4e2d0f51c 100755 --- a/apertium_apy/apy.py +++ b/apertium_apy/apy.py @@ -44,6 +44,7 @@ GenerateHandler, IdentifyLangHandler, ListHandler, + ListPairHandler, ListLanguageNamesHandler, PerWordHandler, PipeDebugHandler, @@ -265,7 +266,7 @@ def setup_application(args): handlers = [ (r'/', RootHandler), (r'/list', ListHandler), - (r'/listPairs', ListHandler), + (r'/listPairs', ListPairHandler), (r'/stats', StatsHandler), (r'/pairprefs', PairPrefsHandler), (r'/translate', TranslateHandler), diff --git a/apertium_apy/handlers/__init__.py b/apertium_apy/handlers/__init__.py index 40705fdb1..e9ce495ed 100644 --- a/apertium_apy/handlers/__init__.py +++ b/apertium_apy/handlers/__init__.py @@ -5,6 +5,7 @@ from apertium_apy.handlers.identify_lang import IdentifyLangHandler # noqa: F401 from apertium_apy.handlers.list_language_names import ListLanguageNamesHandler # noqa: F401 from apertium_apy.handlers.list_modes import ListHandler # noqa: F401 +from apertium_apy.handlers.list_modes import ListPairHandler # noqa: F401 from apertium_apy.handlers.per_word import PerWordHandler # noqa: F401 from apertium_apy.handlers.pipe_debug import PipeDebugHandler # noqa: F401 from apertium_apy.handlers.speller import SpellerHandler # noqa: F401 diff --git a/apertium_apy/handlers/list_modes.py b/apertium_apy/handlers/list_modes.py index decd6932e..a8d496ddb 100644 --- a/apertium_apy/handlers/list_modes.py +++ b/apertium_apy/handlers/list_modes.py @@ -7,9 +7,31 @@ class ListHandler(BaseHandler): @tornado.gen.coroutine def get(self): - query = self.get_argument('q', default='pairs') + query = self.get_argument('q', default='all') - if query == 'pairs': + if query == 'all': + src = self.get_argument('src', default=None) + pairs_data = [] + if src: + pairs = [(src, trg) for trg in self.paths[src]] + else: + pairs = [(p[0], p[1]) for par in self.pairs for p in [par.split('-')]] + for (l1, l2) in pairs: + pairs_data.append({'sourceLanguage': l1, 'targetLanguage': l2}) + if self.get_arguments('include_deprecated_codes'): + pairs_data.append({'sourceLanguage': to_alpha2_code(l1), 'targetLanguage': to_alpha2_code(l2)}) + response = { + 'responseData': { + 'pairsData': pairs_data, + 'generatorsData': {pair: modename for (pair, (path, modename)) in self.generators.items()}, + 'taggersData': {pair: modename for (pair, (path, modename)) in self.taggers.items()}, + 'spellersData': {lang_src: modename for (lang_src, (path, modename)) in self.spellers.items()} + }, + 'responseDetails': None, + 'responseStatus': 200 + } + self.send_response(response) + elif query == 'pairs': src = self.get_argument('src', default=None) response_data = [] if src: @@ -30,4 +52,19 @@ def get(self): elif query == 'spellers': self.send_response({lang_src: modename for (lang_src, (path, modename)) in self.spellers.items()}) else: - self.send_error(400, explanation='Expecting q argument to be one of analysers, generators, spellers, disambiguators, or pairs') + self.send_error(400, explanation='Expecting q argument to be one of analysers, generators, spellers, disambiguators, pairs or all') + +class ListPairHandler(BaseHandler): + @tornado.gen.coroutine + def get(self): + src = self.get_argument('src', default=None) + response_data = [] + if src: + pairs = [(src, trg) for trg in self.paths[src]] + else: + pairs = [(p[0], p[1]) for par in self.pairs for p in [par.split('-')]] + for (l1, l2) in pairs: + response_data.append({'sourceLanguage': l1, 'targetLanguage': l2}) + if self.get_arguments('include_deprecated_codes'): + response_data.append({'sourceLanguage': to_alpha2_code(l1), 'targetLanguage': to_alpha2_code(l2)}) + self.send_response({'responseData': response_data, 'responseDetails': None, 'responseStatus': 200}) diff --git a/tests/test.py b/tests/test.py index 575e6dd1d..869fecb41 100755 --- a/tests/test.py +++ b/tests/test.py @@ -143,6 +143,33 @@ def test_home_page(self): class TestListHandler(BaseTestCase): + def test_list_all(self): + response = self.fetch_json('/list', {'q': 'all'}) + self.assertIsNone(response['responseDetails']) + self.assertEqual(response['responseStatus'], 200) + + expected_pairs = set(map(lambda x: frozenset(x.items()), [ + {'sourceLanguage': 'sme', 'targetLanguage': 'nob'}, + {'sourceLanguage': 'eng', 'targetLanguage': 'spa'}, + {'sourceLanguage': 'spa', 'targetLanguage': 'eng_US'}, + {'sourceLanguage': 'spa', 'targetLanguage': 'eng'}, + ])) + expected_generators = {'nno': 'nno-gener'} + expected_taggers = {'nno': 'nno-tagger'} + expected_spellers = {'nno': 'nno-speller'} + + response_pairs = set(map(lambda x: frozenset(x.items()), response['responseData']['pairsData'])) + self.assertTrue(response_pairs >= expected_pairs, '{} is missing one of {}'.format(response_pairs, expected_pairs)) + + self.assertTrue(response['responseData']['generatorsData'].items() >= expected_generators.items(), + '{} is missing {}'.format(response['responseData']['generatorsData'], expected_generators)) + + self.assertTrue(response['responseData']['taggersData'].items() >= expected_taggers.items(), + '{} is missing {}'.format(response['responseData']['taggersData'], expected_taggers)) + + self.assertTrue(response['responseData']['spellersData'].items() >= expected_spellers.items(), + '{} is missing {}'.format(response['responseData']['spellersData'], expected_spellers)) + def test_list_pairs(self): response = self.fetch_json('/list', {'q': 'pairs'}) self.assertIsNone(response['responseDetails'])