diff --git a/.pep8speaks.yml b/.pep8speaks.yml new file mode 100644 index 0000000..1a35e80 --- /dev/null +++ b/.pep8speaks.yml @@ -0,0 +1,28 @@ +# File : .pep8speaks.yml + +scanner: + diff_only: True # If False, the entire file touched by the Pull Request is scanned for errors. If True, only the diff is scanned. + linter: pycodestyle # Other option is flake8 + +pycodestyle: # Same as scanner.linter value. Other option is flake8 + max-line-length: 110 # Default is 79 in PEP 8 + ignore: # Errors and warnings to ignore + - W504 # line break after binary operator + - E402 # module level import not at top of file + - E731 # do not assign a lambda expression, use a def + - C406 # Unnecessary list literal - rewrite as a dict literal. + - E741 # ambiguous variable name + +no_blank_comment: True # If True, no comment is made on PR without any errors. +descending_issues_order: False # If True, PEP 8 issues in message will be displayed in descending order of line numbers in the file + +message: # Customize the comment made by the bot, + opened: # Messages when a new PR is submitted + header: "Hello @{name}! Thanks for opening this PR. " + # The keyword {name} is converted into the author's username + footer: "Do see the [Hitchhiker's guide to code style](https://goo.gl/hqbW4r)" + # The messages can be written as they would over GitHub + updated: # Messages when new commits are added to the PR + header: "Hello @{name}! Thanks for updating this PR. " + footer: "" # Why to comment the link to the style guide everytime? :) + no_errors: "There are currently no PEP 8 issues detected in this Pull Request. Cheers! :beers: " diff --git a/.travis.yml b/.travis.yml index 0ad21ad..0966163 100755 --- a/.travis.yml +++ b/.travis.yml @@ -18,9 +18,9 @@ sudo: false python: - 2.7 # - 3.4 # will be deprecated for pandas - - 3.5 - 3.6 - 3.7 + - 3.8 # See http://docs.travis-ci.com/user/caching/#pip-cache cache: pip diff --git a/MANIFEST.in b/MANIFEST.in index 8e87ab1..423b933 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -32,6 +32,8 @@ recursive-include figures *.png # Include the sample data recursive-include data_images *.yml *.yaml +recursive-include assets *.png + prune .git prune venv prune build diff --git a/README.md b/README.md index 788929b..398f975 100755 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ We present a final step of image processing pipeline which accepts a large number of images, containing spatial expression information for thousands of genes in Drosophila imaginal discs. We assume that the gene activations are binary and can be expressed as a union of a small set of non-overlapping spatial patterns, yielding a compact representation of the spatial activation of each gene. This lends itself well to further automatic analysis, with the hope of discovering new biological relationships. Traditionally, the images were labelled manually, which was very time-consuming. The key part of our work is a binary pattern dictionary learning algorithm, that takes a set of binary images and determines a set of patterns, which can be used to represent the input images with a small error. -![schema](figures/pipeline_schema.png) +![schema](assets/pipeline_schema.png) For the image segmentation and individual object detection, we used [Image segmentation toolbox](https://borda.github.io/pyImSegm/). @@ -86,13 +86,13 @@ python experiments/run_dataset_generate.py \ ``` **Sample atlases** -![atlases](figures/synth_atlases.png) +![atlases](assets/synth_atlases.png) **Sample binary images** -![binary samples](figures/synth_samples_binary.png) +![binary samples](assets/synth_samples_binary.png) **Sample fuzzy images** -![fuzzy samples](figures/synth_samples_fuzzy.png) +![fuzzy samples](assets/synth_samples_fuzzy.png) For adding Gaussian noise with given sigmas use following script: ```bash @@ -101,7 +101,7 @@ python experiments/run_dataset_add_noise.py \ -d apdDataset_vX --sigma 0.01 0.1 0.2 ``` -![gauss noise](figures/synth_gauss-noise.png) +![gauss noise](assets/synth_gauss-noise.png) ### Real images @@ -226,7 +226,7 @@ python experiments/run_reconstruction.py \ --nb_workers 1 --visual ``` -![reconstruction](figures/reconst_imag-disc.png) +![reconstruction](assets/reconst_imag-disc.png) ### Aggregating results diff --git a/figures/pipeline_schema.png b/assets/pipeline_schema.png old mode 100755 new mode 100644 similarity index 100% rename from figures/pipeline_schema.png rename to assets/pipeline_schema.png diff --git a/figures/reconst_imag-disc.png b/assets/reconst_imag-disc.png old mode 100755 new mode 100644 similarity index 100% rename from figures/reconst_imag-disc.png rename to assets/reconst_imag-disc.png diff --git a/figures/synth_atlases.png b/assets/synth_atlases.png similarity index 100% rename from figures/synth_atlases.png rename to assets/synth_atlases.png diff --git a/figures/synth_gauss-noise.png b/assets/synth_gauss-noise.png similarity index 100% rename from figures/synth_gauss-noise.png rename to assets/synth_gauss-noise.png diff --git a/figures/synth_samples_binary.png b/assets/synth_samples_binary.png similarity index 100% rename from figures/synth_samples_binary.png rename to assets/synth_samples_binary.png diff --git a/figures/synth_samples_fuzzy.png b/assets/synth_samples_fuzzy.png similarity index 100% rename from figures/synth_samples_fuzzy.png rename to assets/synth_samples_fuzzy.png diff --git a/bpdl/data_utils.py b/bpdl/data_utils.py index ae6f459..506af7f 100755 --- a/bpdl/data_utils.py +++ b/bpdl/data_utils.py @@ -1,20 +1,21 @@ """ The basic module for generating synthetic images and also loading / exporting -Copyright (C) 2015-2018 Jiri Borovec +Copyright (C) 2015-2020 Jiri Borovec """ from __future__ import absolute_import -import os import glob -import logging # import warnings import itertools +import logging import multiprocessing as mproc +import os from functools import partial # to suppress all visual, has to be on the beginning import matplotlib + if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg': print('No display found. Using non-interactive Agg backend.') # https://matplotlib.org/faq/usage_faq.html @@ -31,7 +32,7 @@ from imsegm.utilities.experiments import WrapExecuteSequence from imsegm.utilities.data_io import io_imread, io_imsave -from .utilities import create_clean_folder +from bpdl.utilities import create_clean_folder NB_WORKERS = mproc.cpu_count() IMAGE_SIZE_2D = (128, 128) @@ -154,7 +155,7 @@ def image_deform_elastic(im, coef=0.5, grid_size=(20, 20), rand_seed=None): >>> img = np.zeros((10, 15), dtype=int) >>> img[2:8, 3:7] = 1 >>> img[6:, 9:] = 2 - >>> image_deform_elastic(img, coef=0.3, grid_size=(5, 5), rand_seed=0) + >>> image_deform_elastic(img, coef=0.3, grid_size=(2, 2), rand_seed=0) array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], @@ -164,7 +165,7 @@ def image_deform_elastic(im, coef=0.5, grid_size=(20, 20), rand_seed=None): [0, 0, 0, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 2, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2]], dtype=uint8) + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) >>> img = np.zeros((10, 15, 5), dtype=int) >>> img[2:8, 3:7, :] = 1 >>> im = image_deform_elastic(img, coef=0.2, grid_size=(4, 5), rand_seed=0) @@ -734,19 +735,19 @@ def export_image(path_out, img, im_name, name_template=SEGM_PATTERN, (5, 10) >>> np.round(im.astype(float), 1).tolist() # doctest: +NORMALIZE_WHITESPACE [[0.6, 0.7, 0.6, 0.6, 0.4, 0.7, 0.4, 0.9, 1.0, 0.4], - [0.8, 0.5, 0.6, 0.9, 0.1, 0.1, 0.0, 0.8, 0.8, 0.9], - [1.0, 0.8, 0.5, 0.8, 0.1, 0.7, 0.1, 1.0, 0.5, 0.4], - [0.3, 0.8, 0.5, 0.6, 0.0, 0.6, 0.6, 0.6, 1.0, 0.7], - [0.4, 0.4, 0.7, 0.1, 0.7, 0.7, 0.2, 0.1, 0.3, 0.4]] + [0.8, 0.5, 0.6, 0.9, 0.1, 0.1, 0.0, 0.8, 0.8, 0.9], + [1.0, 0.8, 0.5, 0.8, 0.1, 0.7, 0.1, 1.0, 0.5, 0.4], + [0.3, 0.8, 0.5, 0.6, 0.0, 0.6, 0.6, 0.6, 1.0, 0.7], + [0.4, 0.4, 0.7, 0.1, 0.7, 0.7, 0.2, 0.1, 0.3, 0.4]] >>> img = np.random.randint(0, 9, [5, 10]) >>> path_img = export_image('.', img, 'testing-image', stretch_range=False) >>> name, im = load_image(path_img, fuzzy_val=False) >>> im.tolist() # doctest: +NORMALIZE_WHITESPACE [[4, 4, 6, 4, 4, 3, 4, 4, 8, 4], - [3, 7, 5, 5, 0, 1, 5, 3, 0, 5], - [0, 1, 2, 4, 2, 0, 3, 2, 0, 7], - [5, 0, 2, 7, 2, 2, 3, 3, 2, 3], - [4, 1, 2, 1, 4, 6, 8, 2, 3, 0]] + [3, 7, 5, 5, 0, 1, 5, 3, 0, 5], + [0, 1, 2, 4, 2, 0, 3, 2, 0, 7], + [5, 0, 2, 7, 2, 2, 3, 3, 2, 3], + [4, 1, 2, 1, 4, 6, 8, 2, 3, 0]] >>> os.remove(path_img) Image - TIFF @@ -1135,7 +1136,7 @@ def dataset_load_weights(path_base, name_csv=CSV_NAME_WEIGHTS, img_names=None): encoding = np.array([[int(x) for x in c.split(';')] for c in coding]) # the new encoding with pattern names else: - encoding = df.as_matrix() + encoding = df.values return np.array(encoding) diff --git a/bpdl/dictionary_learning.py b/bpdl/dictionary_learning.py index dc58004..34ce97a 100755 --- a/bpdl/dictionary_learning.py +++ b/bpdl/dictionary_learning.py @@ -2,16 +2,17 @@ The main module for Atomic pattern dictionary, jjoiningthe atlas estimation and computing the encoding / weights -Copyright (C) 2015-2018 Jiri Borovec +Copyright (C) 2015-2020 Jiri Borovec """ from __future__ import absolute_import +import logging import os import time -import logging # to suppress all visual, has to be on the beginning import matplotlib + if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg': print('No display found. Using non-interactive Agg backend.') # https://matplotlib.org/faq/usage_faq.html @@ -25,14 +26,14 @@ # using https://github.com/Borda/pyGCO from gco import cut_general_graph, cut_grid_graph_simple -from .pattern_atlas import ( +from bpdl.pattern_atlas import ( compute_positive_cost_images_weights, edges_in_image2d_plane, init_atlas_mosaic, atlas_split_indep_ptn, reinit_atlas_likely_patterns, compute_relative_penalty_images_weights) -from .pattern_weights import ( +from bpdl.pattern_weights import ( weights_image_atlas_overlap_major, weights_image_atlas_overlap_partial) -from .metric_similarity import compare_atlas_adjusted_rand -from .data_utils import export_image -from .registration import register_images_to_atlas_demons +from bpdl.metric_similarity import compare_atlas_adjusted_rand +from bpdl.data_utils import export_image +from bpdl.registration import register_images_to_atlas_demons NB_GRAPH_CUT_ITER = 5 TEMPLATE_NAME_ATLAS = 'BPDL_{}_{}_iter_{:04d}' diff --git a/bpdl/metric_similarity.py b/bpdl/metric_similarity.py index 6eacb18..575d1fc 100755 --- a/bpdl/metric_similarity.py +++ b/bpdl/metric_similarity.py @@ -1,7 +1,7 @@ """ Introducing some used similarity measures fro atlases and etc. -Copyright (C) 2015-2018 Jiri Borovec +Copyright (C) 2015-2020 Jiri Borovec """ # from __future__ import absolute_import diff --git a/bpdl/pattern_atlas.py b/bpdl/pattern_atlas.py index 1767aff..8731021 100755 --- a/bpdl/pattern_atlas.py +++ b/bpdl/pattern_atlas.py @@ -1,19 +1,19 @@ """ Estimating the pattern dictionary module -Copyright (C) 2015-2018 Jiri Borovec +Copyright (C) 2015-2020 Jiri Borovec """ # from __future__ import absolute_import import logging # import numba import numpy as np -from sklearn.decomposition import SparsePCA, FastICA, DictionaryLearning, NMF -from skimage import morphology, measure, segmentation, filters from scipy import ndimage as ndi +from skimage import morphology, measure, segmentation, filters +from sklearn.decomposition import SparsePCA, FastICA, DictionaryLearning, NMF -from .data_utils import image_deform_elastic, extract_image_largest_element -from .pattern_weights import ( +from bpdl.data_utils import image_deform_elastic, extract_image_largest_element +from bpdl.pattern_weights import ( weights_label_atlas_overlap_threshold, convert_weights_binary2indexes) REINIT_PATTERN_COMPACT = True @@ -403,7 +403,7 @@ def init_atlas_sparse_pca(imgs, nb_patterns, nb_iter=5, bg_threshold=0.1): >>> atlas[3:7, 6:12] = 2 >>> luts = np.array([[0, 1, 0]] * 99 + [[0, 0, 1]] * 99 + [[0, 1, 1]] * 99) >>> imgs = [lut[atlas] for lut in luts] - >>> init_atlas_sparse_pca(imgs, 2) + >>> init_atlas_sparse_pca(imgs, 2, bg_threshold=0.05) array([[0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0], [0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0], [0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0], diff --git a/bpdl/pattern_weights.py b/bpdl/pattern_weights.py index 6dd120a..fd42e50 100755 --- a/bpdl/pattern_weights.py +++ b/bpdl/pattern_weights.py @@ -1,7 +1,7 @@ """ Estimating pattern weight vector for each image -Copyright (C) 2015-2018 Jiri Borovec +Copyright (C) 2015-2020 Jiri Borovec """ # from __future__ import absolute_import diff --git a/bpdl/registration.py b/bpdl/registration.py index ae0c979..2ef91da 100644 --- a/bpdl/registration.py +++ b/bpdl/registration.py @@ -5,21 +5,21 @@ * http://insightsoftwareconsortium.github.io/SimpleITK-Notebooks/ * https://bic-berkeley.github.io/psych-214-fall-2016/dipy_registration.html -Copyright (C) 2017-2018 Jiri Borovec +Copyright (C) 2017-2020 Jiri Borovec """ -import time import logging +import time # import multiprocessing as mproc from functools import partial import numpy as np -from scipy import ndimage, interpolate # from scipy.ndimage import filters from dipy.align import VerbosityLevels from dipy.align.imwarp import SymmetricDiffeomorphicRegistration, DiffeomorphicMap from dipy.align.metrics import SSDMetric from imsegm.utilities.experiments import WrapExecuteSequence, nb_workers +from scipy import ndimage, interpolate NB_WORKERS = nb_workers(0.8) @@ -93,7 +93,6 @@ def register_demons_sym_diffeom(img_sense, img_ref, smooth_sigma=1., [ 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.], [ 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> np.round(img_warp - img_sense, 1) # doctest: +SKIP >>> img_sense = np.zeros(img_ref.shape, dtype=int) >>> img_sense[4:9, 3:10] = 1 >>> img_sense diff --git a/bpdl/utilities.py b/bpdl/utilities.py index 8555bb1..3d59967 100755 --- a/bpdl/utilities.py +++ b/bpdl/utilities.py @@ -1,24 +1,26 @@ """ The basic module for generating synthetic images and also loading / exporting -Copyright (C) 2015-2018 Jiri Borovec +Copyright (C) 2015-2020 Jiri Borovec """ +import logging # from __future__ import absolute_import import os import re -import types -import logging import shutil -# import multiprocessing.pool -# import multiprocessing as mproc -# from functools import wraps +import types import numpy as np from scipy import stats from scipy.spatial import distance +# import multiprocessing.pool +# import multiprocessing as mproc +# from functools import wraps + + # def update_path(path_file, lim_depth=5, absolute=True): # """ bubble in the folder tree up intil it found desired file # otherwise return original one diff --git a/circle.yml b/circle.yml index 2579f11..8ea589e 100755 --- a/circle.yml +++ b/circle.yml @@ -20,14 +20,20 @@ references: test_coverage: &test_coverage run: - name: Testing and Formating + name: Testing and Coverage command: | unset DISPLAY python --version ; pip --version ; pwd ; ls -l python setup.py check -m -s mkdir results && mkdir test-reports - coverage run --source=bpdl,experiments -m py.test bpdl experiments -v --doctest-modules --junitxml=test-reports/pytest_junit.xml - flake8 . --ignore=E402,E731 --max-line-length=100 + coverage run --source=bpdl,experiments -m pytest bpdl experiments -v --doctest-modules --junitxml=test-reports/pytest_junit.xml + + formatting: &formatting + run: + name: Formating + command: | + pip install flake8 + flake8 . dataset: &dataset run: @@ -39,13 +45,16 @@ references: python experiments/run_dataset_add_noise.py -p ./data_images make_docs: &make_docs - run: - name: Make Documentation - command: | - sudo apt-get install pandoc - sudo pip install -r docs/requirements.txt - # sphinx-apidoc -o ./docs/source ./imsegm **/tests/* --force --follow-links - cd docs; make html + run: + name: Make Documentation + command: | + sudo apt-get install pandoc + sudo pip install -r docs/requirements.txt + # sphinx-apidoc -o ./docs/source ./imsegm **/test_* --force --follow-links + #python setup.py build_ext --inplace + cd docs + make html --debug --jobs 2 SPHINXOPTS="-W" + make latexpdf expt_pre: &expt_pre run: @@ -73,6 +82,13 @@ references: python experiments/run_parse_experiments_result.py -i ./results -r results_NEW.csv jobs: + Formatting: + docker: + - image: circleci/python:3.6 + steps: &steps_test + - checkout + - *formatting + Py3-Tests: docker: - image: circleci/python:3.6 @@ -84,7 +100,6 @@ jobs: - *dataset - *test_coverage - - *make_docs # PASSING - run: name: Finalise @@ -121,6 +136,21 @@ jobs: - image: circleci/python:2.7 steps: *steps_expt + Build-Docs: + docker: + - image: circleci/python:3.6 + steps: + - checkout + - run: + name: TexLive + command: | + sudo apt-get update -qq + sudo apt-get install -y imagemagick ghostscript latexmk texlive texlive-latex-recommended texlive-fonts-recommended texlive-formats-extra + # INSTALLATION + - *install_pips + # DOCUMENTATION + - *make_docs + workflows: version: 2 build: @@ -129,3 +159,5 @@ workflows: - Py3-Tests - Py2-Experiments - Py3-Experiments + - Build-Docs + - Formatting diff --git a/docs/requirements.txt b/docs/requirements.txt index 392d121..01e05d6 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,7 @@ -sphinx>=1.4 +sphinx >= 2.0, < 3.0 recommonmark # fails with badges m2r # fails with multi-line text nbsphinx pandoc -docutils<0.15 # higher version breaks py2 \ No newline at end of file +docutils < 0.15 # higher version breaks py2 +ipython \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 39facef..df8d7ff 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -17,50 +17,64 @@ import glob import shutil import inspect +import re import m2r -PATH_ROOT = os.path.join('..', '..') +PATH_UP = os.path.join('..', '..') PATH_HERE = os.path.abspath(os.path.dirname(__file__)) +PATH_ROOT = os.path.abspath(os.path.join(PATH_HERE, PATH_UP)) sys.path.insert(0, os.path.abspath(PATH_ROOT)) import bpdl # noqa: E402 +# -- Project information ----------------------------------------------------- + +project = 'BPDL' +copyright = bpdl.__copyright__ +author = bpdl.__author__ + +# The short X.Y version +version = bpdl.__version__ +# The full version, including alpha/beta/rc tags +release = bpdl.__version__ + +# Options for the linkcode extension +# ---------------------------------- +github_user = 'Borda' +github_repo = 'pyBPDL' + # -- Project documents ------------------------------------------------------- # export the documentation with open('intro.rst', 'w') as fp: intro = bpdl.__long_doc__.replace(os.linesep + ' ', '') fp.write(m2r.convert(intro)) - # fp.write(bpdl.__doc__) # export the READme with open(os.path.join(PATH_ROOT, 'README.md'), 'r') as fp: readme = fp.read() # replace all paths to relative -for ndir in (os.path.basename(p) for p in glob.glob(os.path.join(PATH_ROOT, '*')) - if os.path.isdir(p)): - readme = readme.replace('](%s/' % ndir, '](%s/%s/' % (PATH_ROOT, ndir)) +readme = readme.replace('](docs/source/', '](') +# Todo: this seems to replace only once per line +readme = re.sub(r' \[(.*)\]\((?!http)(.*)\)', + r' [\1](https://github.com/%s/%s/blob/master/\2)' % (github_user, github_repo), + readme) +# TODO: temp fix removing SVG badges and GIF, because PDF cannot show them +readme = re.sub(r'(\[!\[.*\))', '', readme) +readme = re.sub(r'(!\[.*.gif\))', '', readme) +for dir_name in (os.path.basename(p) for p in glob.glob(os.path.join(PATH_ROOT, '*')) + if os.path.isdir(p)): + readme = readme.replace('](%s/' % dir_name, '](%s/%s/' % (PATH_UP, dir_name)) with open('readme.md', 'w') as fp: fp.write(readme) -# -- Project information ----------------------------------------------------- - -project = 'BPDL' -copyright = bpdl.__copyright__ -author = bpdl.__author__ - -# The short X.Y version -version = bpdl.__version__ -# The full version, including alpha/beta/rc tags -release = bpdl.__version__ - # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '1.4' +needs_sphinx = '2.4' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -75,6 +89,7 @@ 'sphinx.ext.linkcode', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary', + # 'sphinxcontrib.rsvgconverter' 'recommonmark', # 'm2r', 'nbsphinx', @@ -109,7 +124,14 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['*.run_*', '*.show_*', '*.test_*'] +exclude_patterns = [ + '*.run_*', + '*.show_*', + '*.test_*', + 'api/modules.rst', + '*/overview_ovary_user-*.ipynb', + '*/regist-image-ptn_itk_*.ipynb', +] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None @@ -131,7 +153,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = [] # '_static' # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -237,8 +259,13 @@ def run_apidoc(_): for pkg in PACKAGES: - argv = ['-e', '-o', PATH_HERE, os.path.join(PATH_HERE, PATH_ROOT, pkg), - 'run_*', 'show_*', 'test_*', '--force'] + argv = ['-e', + '-o', os.path.join(PATH_HERE, 'api'), + os.path.join(PATH_ROOT, pkg), + os.path.join(PATH_ROOT, 'experiments', 'run_*'), + os.path.join(PATH_ROOT, 'experiments', 'show_*'), + os.path.join(PATH_ROOT, 'experiments', 'test_*'), + '--force'] try: # Sphinx 1.7+ from sphinx.ext import apidoc @@ -280,13 +307,7 @@ def setup(app): MOCK_MODULES.append(pkg.rstrip()) # TODO: better parse from package since the import name and package name may differ -autodoc_mock_imports = MOCK_MODULES + ['yaml', 'sklearn', 'skimage', 'gco', 'imsegm'] - - -# Options for the linkcode extension -# ---------------------------------- -github_user = 'Borda' -github_repo = 'pyBPDL' +autodoc_mock_imports = MOCK_MODULES + ['yaml', 'sklearn', 'skimage', 'gco'] # , 'imsegm' # Resolve function @@ -300,7 +321,7 @@ def find_source(): obj = getattr(obj, part) fname = inspect.getsourcefile(obj) # https://github.com/rtfd/readthedocs.org/issues/5735 - if any([s in fname for s in ('readthedocs', 'checkouts')]): + if any([s in fname for s in ('readthedocs', 'rtfd', 'checkouts')]): path_top = os.path.abspath(os.path.join('..', '..', '..')) fname = os.path.relpath(fname, start=path_top) else: @@ -318,6 +339,10 @@ def find_source(): # import subprocess # tag = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, # universal_newlines=True).communicate()[0][:-1] + branch = filename.split('/')[0] + # do mapping from latest tags to master + branch = {'latest': 'master', 'stable': 'master'}.get(branch, branch) + filename = '/'.join([branch] + filename.split('/')[1:]) return "https://github.com/%s/%s/blob/%s" \ % (github_user, github_repo, filename) diff --git a/docs/source/index.rst b/docs/source/index.rst index c40aa45..b6086dc 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -8,8 +8,8 @@ Contents :maxdepth: 2 readme - bpdl - experiments + api/bpdl + api/experiments examples Indices and tables diff --git a/experiments/__init__.py b/experiments/__init__.py index 17723f9..5f7bdc0 100755 --- a/experiments/__init__.py +++ b/experiments/__init__.py @@ -1,3 +1,3 @@ import bpdl -bpdl +bpdl # to utilize the same init sequence diff --git a/experiments/experiment_general.py b/experiments/experiment_general.py index 91cb575..496d26b 100755 --- a/experiments/experiment_general.py +++ b/experiments/experiment_general.py @@ -17,11 +17,6 @@ import collections import multiprocessing as mproc -import matplotlib -if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg': - print('No display found. Using non-interactive Agg backend.') - matplotlib.use('Agg') - import numpy as np import pandas as pd from sklearn import metrics @@ -198,7 +193,7 @@ def parse_arg_params(parser): if 'nb_patterns' in args: if is_list_like(args['nb_patterns']): - args.update({'nb_labels': [l + 1 for l in args['nb_patterns']]}) + args.update({'nb_labels': [lb + 1 for lb in args['nb_patterns']]}) else: args['nb_labels'] = args['nb_patterns'] + 1 del args['nb_patterns'] @@ -245,7 +240,7 @@ def load_list_img_names(path_csv, path_in=''): assert os.path.exists(path_csv), '%s' % path_csv df = pd.read_csv(path_csv, index_col=False, header=None) assert len(df.columns) == 1, 'assume just single column' - list_names = df.as_matrix()[:, 0].tolist() + list_names = df.values[:, 0].tolist() # if the input path was set and the list are just names, no complete paths if os.path.exists(path_in) and not all(os.path.exists(p) for p in list_names): # to each image name add the input path diff --git a/experiments/experiment_methods.py b/experiments/experiment_methods.py index 13287f1..31fc4b7 100755 --- a/experiments/experiment_methods.py +++ b/experiments/experiment_methods.py @@ -39,7 +39,7 @@ def estim_atlas_as_argmax(atlas_components, fit_results, force_bg=False, :param list(ndarray) fit_results: :param float max_bg_ration: reset BG threshold if the background is larger :param bool force_bg: force too small components as background - :return ndarray : np.array + :return ndarray: np.array """ ptn_used = np.sum(np.abs(fit_results), axis=0) > 0 # filter just used patterns diff --git a/experiments/run_cut_minimal_images.py b/experiments/run_cut_minimal_images.py index ca4dfbd..c4a0ffa 100755 --- a/experiments/run_cut_minimal_images.py +++ b/experiments/run_cut_minimal_images.py @@ -165,7 +165,7 @@ def main(path_pattern_in, path_out, nb_workers=NB_WORKERS): # create partial subset with image pathes list_img_paths_partial = [list_img_paths[i::nb_workers * LOAD_SUBSET_COEF] for i in range(nb_workers * LOAD_SUBSET_COEF)] - list_img_paths_partial = [l for l in list_img_paths_partial if len(l) > 0] + list_img_paths_partial = [ls for ls in list_img_paths_partial if ls] mean_imgs = list(WrapExecuteSequence(load_mean_image, list_img_paths_partial, nb_workers=nb_workers, diff --git a/experiments/run_experiments.py b/experiments/run_experiments.py index e23cb06..dd8b66b 100755 --- a/experiments/run_experiments.py +++ b/experiments/run_experiments.py @@ -35,12 +35,13 @@ sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root from bpdl.utilities import is_list_like, is_iterable -from bpdl.data_utils import (DIR_NAME_DICTIONARY, DEFAULT_NAME_DATASET, - dataset_compose_atlas, find_images, dataset_load_images) +from bpdl.data_utils import ( + DIR_NAME_DICTIONARY, DEFAULT_NAME_DATASET, + dataset_compose_atlas, find_images, dataset_load_images) from bpdl.dictionary_learning import bpdl_pipeline from bpdl.pattern_atlas import init_atlas_mosaic -from experiments.experiment_general import (SYNTH_PARAMS, REAL_PARAMS, - SYNTH_PATH_APD, parse_params) +from experiments.experiment_general import ( + SYNTH_PARAMS, REAL_PARAMS, SYNTH_PATH_APD, parse_params) from experiments.experiment_methods import ( ExperimentSparsePCA, ExperimentFastICA, ExperimentDictLearn, ExperimentNMF, ExperimentSpectClust, ExperimentCanICA, ExperimentMSDL, ExperimentBPDL, DICT_ATLAS_INIT) @@ -64,7 +65,6 @@ 'max_iter': 25, # 250, 150 }) -REAL_PARAMS = REAL_PARAMS REAL_PARAMS.update({ 'method': LIST_METHODS, 'max_iter': 25, # 250, 150 diff --git a/notebooks/overview_ovary_user-label-annot.ipynb b/notebooks/overview_ovary_user-label-annot.ipynb index b0ee1d6..41328b7 100644 --- a/notebooks/overview_ovary_user-label-annot.ipynb +++ b/notebooks/overview_ovary_user-label-annot.ipynb @@ -578,7 +578,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Copy sample images grouoed by labels" + "## Copy sample images grouped by labels" ] }, { @@ -1118,4 +1118,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 176a70d..9c1e4a6 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,18 +1,18 @@ # git+https://github.com/Borda/pyImSegm.git@devel https://github.com/Borda/pyImSegm/archive/master.zip#egg=ImSegm https://github.com/Borda/pyGCO/archive/master.zip#egg=gco-wrapper>=3.0.3 -numpy>=1.8.2,<1.16.0 # version 1.16 breaks skimage 0.14 -scipy>=0.10.0 -pandas>=0.17.1 -six>=1.7.3 -pillow>=2.1.0 -matplotlib>=2.1.1,<3.0.0 # new version does not support py2 -scikit-learn>=0.18.1 -scikit-image>=0.12.1 -tqdm>=4.7.4,<=4.30 # higher fails ascii for py2 -ipython==4.2.0 -# numba>=0.22.1 -nibabel>=2.1.0 -nilearn>=0.3.1 -dipy>=0.11.0,<0.16.0 # failing for py2 in numpy -SimpleITK>=1.0.1 \ No newline at end of file +numpy >= 1.13.3 +scipy >= 1.0 +pandas >= 0.17.1 +six >= 1.7.3 +pillow >= 4.0, < 7 # fail loading JPG images +matplotlib >= 2.1.1, < 3.0.0 # new version does not support py2 +scikit-learn >= 0.18.1 +scikit-image >= 0.12.1 +tqdm >= 4.7.4, <= 4.30 # higher fails ascii for py2 +ipython == 4.2.0 +# numba >= 0.22.1 +nibabel >= 2.1.0 +nilearn >= 0.3.1 +dipy >=0.11.0, < 0.16.0 # failing for py2 in numpy +SimpleITK >= 1.0.1 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index 224a779..557336c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,35 @@ [metadata] -description-file = README.md \ No newline at end of file +description-file = README.md +license-file = LICENSE +requirements-file = requirements.txt + +#[unittest] +#plugins = nose2.plugins.doctests + +[flake8] +# http://flake8.pycqa.org/en/latest/user/configuration.html +ignore = E402,E731 +max-line-length = 100 +exclude = .tox,*.egg,build,temp +select = E,W,F +doctests = True +verbose = 2 +# max-complexity = 10 + +[tool:pytest] +addopts = --doctest-modules +log_cli = 1 +log_cli_level = CRITICAL +#log_cli_format = %(message)s +log_file = pytest.log +log_file_level = DEBUG +#log_file_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s) +#log_file_date_format=%Y-%m-%d %H:%M:%S +filterwarnings = ignore::FutureWarning + +[pydocstyle] +convention = pep257 +# D104, D107: Ignore missing docstrings in __init__ files and methods. +# D202: Ignore a blank line after docstring (collision with Python Black in decorators) +add-ignore = D104,D107,D202 +max-line-length = 120 \ No newline at end of file