Skip to content

Commit

Permalink
Merge pull request #468 from mercycorps/staging
Browse files Browse the repository at this point in the history
Staging to master for perm update release
  • Loading branch information
sanjuroj authored Nov 1, 2021
2 parents 4d5f9b4 + 40305ab commit b9c54d9
Show file tree
Hide file tree
Showing 14 changed files with 31,909 additions and 137 deletions.
12 changes: 2 additions & 10 deletions indicators/forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,11 +321,7 @@ def __init__(self, *args, **kwargs):
self.fields['data_collection_frequencies'].choices = [
(pk, _(freq)) for pk, freq in self.fields['data_collection_frequencies'].choices]

allowed_countries = [
*self.request.user.tola_user.access_data.get('countries', {}).keys(),
*[programaccess['country'] for programaccess in self.request.user.tola_user.access_data.get('programs', [])
if programaccess['program'] == self.programval.pk]
]
allowed_countries = [ac.id for ac in self.request.user.tola_user.available_countries]
countries = self.programval.country.filter(
pk__in=allowed_countries
)
Expand Down Expand Up @@ -635,11 +631,7 @@ def __init__(self, *args, **kwargs):
self.fields['data_collection_frequencies'].choices = [
(pk, _(freq)) for pk, freq in self.fields['data_collection_frequencies'].choices]

allowed_countries = [
*self.request.user.tola_user.access_data.get('countries', {}).keys(),
*[programaccess['country'] for programaccess in self.request.user.tola_user.access_data.get('programs', [])
if programaccess['program'] == self.programval.pk]
]
allowed_countries = [ac.id for ac in self.request.user.tola_user.available_countries]
countries = self.programval.country.filter(
pk__in=allowed_countries
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def get_create_form(self, **kwargs):
data = kwargs.get('data', None)
program = kwargs.get('program', self.program)
request = mock.MagicMock()
request.user.tola_user.access_data = {'countries': {self.country.pk: [], self.user_country.pk: []}}
request.user.tola_user.available_countries = [self.country, self.user_country]
return IndicatorForm(data, program=program, request=request, auto_id=False)


Expand Down Expand Up @@ -425,7 +425,7 @@ def get_update_form(self, **kwargs):
'target_frequency_num_periods': 1 if not instance else instance.target_frequency_num_periods
})
request = mock.MagicMock()
request.user.tola_user.access_data = {'countries': {self.country.pk: [], self.user_country.pk: []}}
request.user.tola_user.available_countries = [self.country, self.user_country]
form_kwargs = {
'program': program,
'initial': initial,
Expand Down
55 changes: 47 additions & 8 deletions scripts/get_github_project_issues.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,21 @@
from requests.auth import HTTPBasicAuth
import json
import os
from pathlib import Path
import yaml
import sys
import csv
import re
import getpass
import argparse

headers = {'Accept': 'application/vnd.github.inertia-preview+json'}

parser = argparse.ArgumentParser(description='Parse a .po file')
parser.add_argument('--column', help='the column name of the tickets you want to extract')
parser.add_argument('--columns', help='comma separated list of columns with tickets you to extract')
parser.add_argument('--closeissues', action='store_true', help='Close all of the issues in the column')
parser.add_argument('--extraoutput', action='store_true', help='Get extra info like labels and description')
parser.add_argument('--labels', default='', help='Comma separated list of labels that should have their own column (already done for LOE and spike')
args = parser.parse_args()

project_name = input('Enter the project name: ')
Expand Down Expand Up @@ -76,11 +80,12 @@
columns_url = columns_template % project_id
response = requests.get(columns_url, headers=headers, auth=auth)
cols_to_fetch = ['Done', 'Ready for Deploy']
if args.column:
cols_to_fetch = args.column.split(",")
if args.columns:
cols_to_fetch = [col.strip() for col in args.columns.split(',')]

column_ids = [col['id'] for col in json.loads(response.text) if col['name'] in cols_to_fetch]
issues = []
space_regex = re.compile(r'\s?\n\s?')
for col_id in column_ids:

# Loop through each card in each column and the the issue data associated
Expand All @@ -99,8 +104,26 @@
continue
issue_num = match.group(1)
issue_url = issue_template.format(issue_num)
issue_response = requests.get(issue_url, headers=headers, auth=auth)
issues.append((issue_num, json.loads(issue_response.text)['title']))
issue_response = json.loads(requests.get(issue_url, headers=headers, auth=auth).text)
description = space_regex.sub(' ', issue_response['body'][:200])
issue_data = {
'number': issue_num, 'title': issue_response['title'], 'description': description}
if args.extraoutput:
label_columns = {'LOE': [], 'spike': ''}
user_labels = {label.strip(): '' for label in args.labels.split(',')}
label_columns.update(user_labels)
label_columns.update({'other': []})
issue_data.update(label_columns)
for label in issue_response['labels']:
if 'LOE' in label['name']:
issue_data['LOE'].append(label['name'])
elif label['name'] in label_columns.keys():
issue_data[label['name']] = 'Yes'
else:
issue_data['other'].append(label['name'])
issue_data['LOE'] = ", ".join(issue_data['LOE'])
issue_data['other'] = ", ".join(issue_data['other'])
issues.append(issue_data)
if args.closeissues:
response = requests.patch(issue_url, headers=headers, auth=auth, json={'state': 'closed'})

Expand All @@ -110,9 +133,25 @@
has_next = False

if issues:
issues.sort(key=lambda k: int(k[0]), reverse=True)
issues.sort(key=lambda k: int(k['number']), reverse=True)
print('')
for i in issues:
print('#%s - %s' % i)
if args.extraoutput:
filepath = f'{str(Path.home())}/github_issue_dump.csv'
if os.path.isfile(filepath):
overwrite = input(f'\nWARNING: {filepath} already exists. \nType "YES" to overwrite: ')
execute = True if overwrite == 'YES' else False
else:
execute = True

if execute:
with open(filepath, 'w') as fh:
writer = csv.writer(fh)
writer.writerow(issues[0].keys()) # Header row
for i in issues:
writer.writerow(i.values())
print(f'CSV written to {filepath}')
else:
for i in issues:
print(f'#{i["number"]} - {i["title"]}')
else:
print("No cards in the column(s)", ', '.join(cols_to_fetch))
93 changes: 0 additions & 93 deletions tola/locale/README-TRANSLATION.md

This file was deleted.

6 changes: 5 additions & 1 deletion tola/management/commands/create_temp_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,16 @@
# the leading whitespace and html tag(s) from the start of the string so the
# "Translated" can be placed right before the string rather than outside of enclosing
# tags.

WHITESPACE_REGEX = re.compile('(^\s*(?:<.+?>)*)?(.+)', re.DOTALL)

class Command(BaseCommand):
help = """Create temporary translations """

diacritic_cycle = itertools.cycle(['Á', 'é', '', 'ö', 'Ź'])
# The diacritic_cycle object serves a dual purpose. One is to make sure that diacritics are included in
# case there is any difficulty with those special characters. It also helps test the ordering of lists if
# the list is not alphabetically sorted (like Frequency of Reporting in the indicator setup).
diacritic_cycle = itertools.cycle(['Á', 'é', 'I', 'ö', 'Ź'])

def handle(self, *args, **options):

Expand Down
Loading

0 comments on commit b9c54d9

Please sign in to comment.