Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

align 4.1.x to master #152

Merged
merged 2 commits into from
Jan 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions importer/handlers/common/raster.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,7 @@ def overwrite_geonode_resource(
dataset.refresh_from_db()

self.handle_xml_file(dataset, _exec)
self.handle_sld_file(dataset, _exec)

resource_manager.set_thumbnail(self.object.uuid, instance=self.object, overwrite=False)
dataset.refresh_from_db()
Expand Down
1 change: 1 addition & 0 deletions importer/handlers/common/vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,7 @@ def overwrite_geonode_resource(
dataset.refresh_from_db()

self.handle_xml_file(dataset, _exec)
self.handle_sld_file(dataset, _exec)

resource_manager.set_thumbnail(dataset.uuid, instance=dataset, overwrite=False)
dataset.refresh_from_db()
Expand Down
37 changes: 19 additions & 18 deletions importer/orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,21 +215,19 @@ def evaluate_execution_progress(self, execution_id, _log=None, handler_module_pa
| Q(task_kwargs__icontains=execution_id)
| Q(result__icontains=execution_id)
)
_has_data = ResourceHandlerInfo.objects.filter(execution_request__exec_id=execution_id).exists()

# .all() is needed since we want to have the last status on the DB without take in consideration the cache
if (
exec_result.all()
.exclude(Q(status=states.SUCCESS) | Q(status=states.FAILURE))
.exists()
):
logger.info(
f"Execution progress with id {execution_id} is not finished yet, continuing"
)
return
self._evaluate_last_dataset(is_last_dataset, _log, execution_id, handler_module_path)
elif exec_result.all().filter(status=states.FAILURE).exists():
"""
Should set it fail if all the execution are done and at least 1 is failed
"""
_has_data = ResourceHandlerInfo.objects.filter(execution_request__exec_id=execution_id).exists()
# failed = [x.task_id for x in exec_result.filter(status=states.FAILURE)]
# _log_message = f"For the execution ID {execution_id} The following celery task are failed: {failed}"
if _has_data:
Expand All @@ -249,22 +247,25 @@ def evaluate_execution_progress(self, execution_id, _log=None, handler_module_pa
execution_id=execution_id, reason=_log
)
else:
if is_last_dataset:
if _log and 'ErrorDetail' in _log:
self.set_as_failed(
execution_id=execution_id, reason=_log
)
else:
logger.info(
f"Execution with ID {execution_id} is completed. All tasks are done"
)
self._last_step(execution_id, handler_module_path)
self.set_as_completed(execution_id)
self._evaluate_last_dataset(is_last_dataset, _log, execution_id, handler_module_path)

def _evaluate_last_dataset(self, is_last_dataset, _log, execution_id, handler_module_path):
if is_last_dataset:
if _log and 'ErrorDetail' in _log:
self.set_as_failed(
execution_id=execution_id, reason=_log
)
else:
logger.info(
f"Execution progress with id {execution_id} is not finished yet, continuing"
f"Execution with ID {execution_id} is completed. All tasks are done"
)
return
self._last_step(execution_id, handler_module_path)
self.set_as_completed(execution_id)
else:
logger.info(
f"Execution progress with id {execution_id} is not finished yet, continuing"
)
return

def create_execution_request(
self,
Expand Down