Skip to content

Commit

Permalink
file upload
Browse files Browse the repository at this point in the history
  • Loading branch information
calel02 committed Oct 12, 2023
1 parent 6c4cb02 commit 37d218d
Show file tree
Hide file tree
Showing 18 changed files with 946 additions and 0 deletions.
98 changes: 98 additions & 0 deletions Jenkinsfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
#!/usr/bin/groovy

@Library(['github.com/indigo-dc/jenkins-pipeline-library@1.4.0']) _

def job_result_url = ''

pipeline {
agent {
label 'python3.8'
}

environment {
author_name = "Carolin Leluschko"
author_email = "carolin.leluschko@dfki.de"
app_name = "litter_assessment_service"
job_location = "Pipeline-as-code/DEEP-OC-org/UC-cleluschko-DEEP-OC-litter_assessment_service/${env.BRANCH_NAME}"
}

stages {
stage('Code fetching') {
steps {
checkout scm
}
}

stage('Style analysis: PEP8') {
steps {
ToxEnvRun('pep8')
}
post {
always {
recordIssues(tools: [flake8(pattern: 'flake8.log')])
}
}
}

stage('Unit testing coverage') {
steps {
ToxEnvRun('cover')
ToxEnvRun('cobertura')
}
post {
success {
HTMLReport('cover', 'index.html', 'coverage.py report')
CoberturaReport('**/coverage.xml')
}
}
}

stage('Metrics gathering') {
agent {
label 'sloc'
}
steps {
checkout scm
SLOCRun()
}
post {
success {
SLOCPublish()
}
}
}

stage('Security scanner') {
steps {
ToxEnvRun('bandit-report')
script {
if (currentBuild.result == 'FAILURE') {
currentBuild.result = 'UNSTABLE'
}
}
}
post {
always {
HTMLReport("/tmp/bandit", 'index.html', 'Bandit report')
}
}
}

stage("Re-build Docker images") {
when {
anyOf {
branch 'master'
branch 'test'
buildingTag()
}
}
steps {
script {
def job_result = JenkinsBuildJob("${env.job_location}")
job_result_url = job_result.absoluteUrl
}
}
}

}
}
10 changes: 10 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@

The MIT License (MIT)
Copyright (c) 2023, Carolin Leluschko

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

34 changes: 34 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# litter_assessment_service
[![Build Status](https://jenkins.indigo-datacloud.eu/buildStatus/icon?job=Pipeline-as-code/DEEP-OC-org/UC-cleluschko-litter_assessment_service/master)](https://jenkins.indigo-datacloud.eu/job/Pipeline-as-code/job/DEEP-OC-org/job/UC-cleluschko-litter_assessment_service/job/master)

Integration of DeepaaS API and litter assessment software

To launch it, first install the package then run [deepaas](https://github.com/indigo-dc/DEEPaaS):
```bash
git clone https://git.ni.dfki.de/cleluschko/litter_assessment_service
cd litter_assessment_service
pip install -e .
deepaas-run --listen-ip 0.0.0.0
```
The associated Docker container for this module can be found in https://git.ni.dfki.de/cleluschko/DEEP-OC-litter_assessment_service.

## Project structure
```
├── LICENSE <- License file
├── README.md <- The top-level README for developers using this project.
├── requirements.txt <- The requirements file for reproducing the analysis environment, e.g.
│ generated with `pip freeze > requirements.txt`
├── setup.py, setup.cfg <- makes project pip installable (pip install -e .) so
│ litter_assessment_service can be imported
├── litter_assessment_service <- Source code for use in this project.
│ │
│ ├── __init__.py <- Makes litter_assessment_service a Python module
│ │
│ └── api.py <- Main script for the integration with DEEP API
└── Jenkinsfile <- Describes basic Jenkins CI/CD pipeline
```
Empty file.
166 changes: 166 additions & 0 deletions litter_assessment_service/api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
# -*- coding: utf-8 -*-
"""
Functions to integrate your model with the DEEPaaS API.
It's usually good practice to keep this file minimal, only performing the interfacing
tasks. In this way you don't mix your true code with DEEPaaS code and everything is
more modular. That is, if you need to write the predict() function in api.py, you
would import your true predict function and call it from here (with some processing /
postprocessing in between if needed).
For example:
import mycustomfile
def predict(**kwargs):
args = preprocess(kwargs)
resp = mycustomfile.predict(args)
resp = postprocess(resp)
return resp
To start populating this file, take a look at the docs [1] and at a canonical exemplar
module [2].
[1]: https://docs.deep-hybrid-datacloud.eu/
[2]: https://github.com/deephdc/demo_app
"""
import tempfile
import os
import logging
import io
import subprocess
from zipfile import ZipFile
from pathlib import Path
from PIL import Image

import pkg_resources
import numpy as np

from litter_assessment_service.misc import _catch_error
from litter_assessment_service.plotting import ResultPlot
from litter_assessment_service import fields, classification, preprocessing

BASE_DIR = Path(__file__).resolve().parents[1]

logger = logging.getLogger('__name__')

PLD_model=None
PLQ_model=None

@_catch_error
def get_metadata():
"""
DO NOT REMOVE - All modules should have a get_metadata() function
with appropriate keys.
"""
distros = list(pkg_resources.find_distributions(str(BASE_DIR), only=True))
if len(distros) == 0:
raise Exception("No package found.")
pkg = distros[0] # if several select first

meta_fields = {
"name": None,
"version": None,
"summary": None,
"home-page": None,
"author": None,
"author-email": None,
"license": None,
}
meta = {}
for line in pkg.get_metadata_lines("PKG-INFO"):
line_low = line.lower() # to avoid inconsistency due to letter cases
for k in meta_fields:
if line_low.startswith(k + ":"):
_, value = line.split(": ", 1)
meta[k] = value

return meta

def get_predict_args():
"""Return the arguments that are needed to perform a prediciton.
Returns:
Dictionary of webargs fields.
"""
predict_args=fields.PredictArgsSchema().fields
logger.debug("Web arguments: %d", predict_args)

return predict_args

def get_input_data(data):
"""
Check content type of uploaded data and return list
with image names and paths to the stored files
"""
if data.content_type == 'application/zip':
tmp_input = tempfile.mkdtemp()
with ZipFile(data.filename, 'r') as zip_file:
zip_file.extractall(tmp_input)
image_names = os.listdir(tmp_input)
image_file = [os.path.join(tmp_input, image_names[i]) for i in range(len(image_names))]
else:
image_file = [data.filename]
image_names = [data.original_filename]

return image_names, image_file

def mount_nextcloud(frompath, topath):
"""
Mount a NextCloud folder in your local machine or viceversa.
"""
command = ["rclone", "copy", frompath, topath]
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = result.communicate()
return output, error

def warm():
"""
Load the models for detection and quantification before requests to the API are made
"""
global model_PLD, model_PLQ
model_name_PLD = "litter_assessment_service/models/PLD_CNN.h5"
model_name_PLQ = "litter_assessment_service/models/PLQ_CNN.h5"
model_PLD = preprocessing.warm(model_name_PLD)
model_PLQ = preprocessing.warm(model_name_PLQ)

def get_arr_from_bin(image_file):
"""
Convert path to image_file to np.array
"""
image=open(image_file,'rb')
image_file=image.read()
image_or= Image.open(io.BytesIO(image_file))
return np.array(image_or).astype(np.uint8)

def save_plot(**kwargs):
"""
Plot classification results and upload the resulting .jpg file to mounted external storage
"""
fig = ResultPlot(kwargs['results'], kwargs['type']).get_plot()
fig.savefig(f'{kwargs["output_path"]}_{kwargs["type"]}.jpg')
mount_nextcloud(f'{kwargs["output_path"]}_{kwargs["type"]}.jpg', f'{kwargs["to_path"]}')

@_catch_error
def predict(**kwargs):
"""
Run inference on uploaded image(s) and run "save_plot(**kwargs)"
for the resulting classifications
"""
data = kwargs["input"]
image_names, image_files = get_input_data(data)
to_path='rshare:iMagine_UC1/results'

with tempfile.TemporaryDirectory() as tmp_dir:
for name, file in zip(image_names, image_files):
output_path=os.path.join(tmp_dir, name[:-4])
if data.content_type=='application/octet-stream':
image=get_arr_from_bin(file)
else:
image_or = Image.open(file)
image = np.array(image_or)
results_PLD = classification.PLD_result(image, image_names, model_PLD)
if kwargs["PLD_plot"]:
save_plot(results=results_PLD, type='PLD', output_path=output_path,to_path=to_path)

if kwargs["PLQ_plot"]:
results_PLQ = classification.PLQ_result(results_PLD.c_matrix, image, image_names, model_PLQ)
save_plot(results=results_PLQ, type='PLQ', output_path=output_path,to_path=to_path)
Loading

0 comments on commit 37d218d

Please sign in to comment.