diff --git a/.editorconfig b/.editorconfig
index 0435634..ace4820 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -9,6 +9,9 @@ end_of_line = lf
trim_trailing_whitespace = true
charset = utf-8
+[*.{md,markdown}]
+trim_trailing_whitespace = false
+
[*.{gitattributes,yml,vcxproj,vcxproj.filters,sln,rc,clang-format}]
indent_style = space
diff --git a/README.md b/README.md
index 7745ef1..b06d287 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,127 @@
# poxy
Documentation generator for C++ based on Doxygen and [mosra/m.css](https://mcss.mosra.cz/).
-```
+ - [Overview](#overview)
+ - [Example](#example)
+ - [Installation](#installation)
+ - [Usage](#usage)
+ - [Migrating from Doxygen](#migrating-from-doxygen)
+ - [Config file options](#config-file-options)
+
+
+
+## Overview
+[mosra/m.css] is a Doxygen-based documentation generator that significantly improves on Doxygen's default output
+by controlling some of Doxygen's more unruly options, supplying it's own slick HTML+CSS generation and adding
+a fantastic live search feature. **Poxy** builds upon both by:
+- Moving the configuration out into a TOML file
+- Preprocessing the Doxygen XML to fix a bunch of Doxygen _~~bugs~~_ quirks
+- Postprocessing the generated HTML to improve syntax highlighting and add a few other improvements
+- Allowing source, image and example directories to be recursive or shallow on a per-directory basis
+- Automatically defining C++ language feature macros based on your project's target C++ version
+- Automatically integrating the cppreference.com doxygen tagfile
+- Providing a number of additional built-in doxygen `@alias` commands
+- Giving more control over the HTML inline using square-bracket `[tags][/tags]`
+- Quite a bit more!
+
+
+
+## Example
+The homepage + documentation for [toml++] is built using poxy:
+- homepage: [marzer.github.io/tomlplusplus](https://marzer.github.io/tomlplusplus/)
+- config file: [`poxy.toml`](https://github.com/marzer/tomlplusplus/blob/master/docs/poxy.toml)
+
+
+
+## Installation
+### Prerequisites:
+- Python 3
+- Doxygen (preferably a version from this decade, though most will be OK)
+### Then:
+```sh
pip install poxy
```
+
+
+
+## Usage
+Poxy is a command-line application.
+```
+poxy [-h] [-v] [--dry] [--threads ] [--m.css ] [--doxygen ] [--werror] [config]
+
+Generate fancy C++ documentation.
+
+positional arguments:
+ config a path to a poxy.toml, Doxyfile.mcss, Doxyfile, or a directory containing one/any/all (default: ./)
+
+optional arguments:
+ -h, --help show this help message and exit
+ -v, --verbose enables very noisy diagnostic output
+ --dry does a 'dry run' only, stopping after emitting the effective Doxyfile
+ --threads sets the number of threads to use (default: automatic)
+ --m.css specifies the version of m.css to use (default: uses the bundled one)
+ --doxygen specifies the Doxygen executable to use (default: finds Doxygen on system path)
+ --werror always treats warnings as errors regardless of config file settings
+```
+The basic three-step to using Poxy is similar to Doxygen:
+1. Create your `poxy.toml` (Poxy's answer to the `Doxyfile`)
+2. Invoke Poxy on it: `poxy path/to/poxy.toml` (or simply `poxy` if the cwd contains the config file)
+3. See your HTML documentation `/html`
+
+ℹ️ If there exists a `Doxyfile` or `Doxyfile-mcss` in the same directory as your `poxy.toml` it will be loaded
+first, then the Poxy overrides applied on top of it. Otherwise a 'default' Doxyfile is used as the base.
+
+
+
+## Config file options
+
+For a self-contained `poxy.toml` example to copy and paste from, see [the one used by toml++](https://github.com/marzer/tomlplusplus/blob/master/docs/poxy.toml).
+
+For a full list of options, with full descriptions, schemas and usage examples, see the [Configuration options] wiki page.
+
+
+
+## Migrating from Doxygen
+Generally the relevant `Doxyfile` options will have a corresponding `poxy.toml` option
+(or be replaced by something more specific) so migration is largely a transcription and box-ticking exercise,
+though there are a few gotchas:
+
+#### **⚠️ The majority of Doxygen's options are controlled by Poxy**
+Very few of the configurable options from the Doxyfile remain untouched by Poxy. This is intentional;
+[m.css] is opinionated, and Poxy even moreso. There are a few instances where information can flow from a Doxyfile to
+Poxy, but these situations are few, and all are documented explicitly on the [Configuration options] wiki page.
+
+#### **⚠️ All relative input paths are relative to the config file, _not_ CWD**
+This is in contrast to Doxygen, which has all paths be relative to the Doxygen process' current working directory
+regardless of where the Doxyfile was. I've always personally found that to be nothing but a source of error,
+so Poxy does away with it.
+
+#### **⚠️ Output is always emitted to CWD**
+Poxy always emits the output html to `/html`. This is largely to simplify the HTML post-process step.
+
+#### **⚠️ Poxy config files are self-contained**
+There is no equivalent to Doxygen's `@INCLUDE`. If your project is structured in such a way that an N-levels-deep
+Doxyfile hierarchy is necessary, Poxy isn't for you.
+
+
+
+## Why the name "Poxy"?
+
+Originally it was simply called "dox", but there's already a C++ documentation project with that name, so I smashed
+"python" and "dox" together and this is what I came up with.
+
+Also "poxy" can be slang for cheap, inferior, poor quality, etc., which I thought was funny.
+
+
+
+## License and Attribution
+This project is published under the terms of the [MIT license](https://github.com/marzer/poxy/blob/main/LICENSE.txt).
+
+Significant credit must go to Vladimír Vondruš ([mosra]) and his amazing [m.css] framework. Poxy bundles a fork of m.css, used per the [MIT/Expat license](https://github.com/mosra/m.css/blob/master/COPYING) (which can also be found in the installed python package).
+
+[m.css]: https://mcss.mosra.cz/documentation/doxygen/
+[mosra]: https://github.com/mosra
+[mosra/m.css]: https://mcss.mosra.cz/documentation/doxygen/
+[toml++]: https://marzer.github.io/tomlplusplus/
+[C++ feature test macros]: https://en.cppreference.com/w/cpp/feature_test
+[Configuration options]: https://github.com/marzer/poxy/wiki/Configuration-options
diff --git a/poxy/doxygen.py b/poxy/doxygen.py
index 6e707b3..9912915 100644
--- a/poxy/doxygen.py
+++ b/poxy/doxygen.py
@@ -76,9 +76,11 @@ def _format_for_doxyfile(val):
class Doxyfile(object):
- def __init__(self, doxyfile_path, cwd=None, logger=None):
+ def __init__(self, doxyfile_path, cwd=None, logger=None, doxygen_path=None, flush_at_exit=True):
self.__logger=logger
self.__dirty=True
+ self.__text = ''
+ self.__autoflush=bool(flush_at_exit)
# the path of the actual doxyfile
self.path = coerce_path(doxyfile_path).resolve()
@@ -87,7 +89,8 @@ def __init__(self, doxyfile_path, cwd=None, logger=None):
self.__cwd = Path.cwd() if cwd is None else coerce_path(cwd).resolve()
assert_existing_directory(self.__cwd)
- self.__text = ''
+ # doxygen itself
+ self.__doxygen = r'doxygen' if doxygen_path is None else coerce_path(doxygen_path)
# read in doxyfile
if self.path.exists():
@@ -100,7 +103,7 @@ def __init__(self, doxyfile_path, cwd=None, logger=None):
else:
log(self.__logger, rf'Warning: doxyfile {self.path} not found! A default one will be generated in-memory.', level=logging.WARNING)
result = subprocess.run(
- r'doxygen -s -g -'.split(),
+ [str(self.__doxygen), r'-s', r'-g', r'-'],
check=True,
capture_output=True,
cwd=self.__cwd,
@@ -118,11 +121,11 @@ def cleanup(self):
if 1:
log(self.__logger, rf'Invoking doxygen to clean doxyfile')
result = subprocess.run(
- r'doxygen -s -u -'.split(),
+ [str(self.__doxygen), r'-s', r'-u', r'-'],
check=True,
capture_output=True,
cwd=self.__cwd,
- encoding='utf-8',
+ encoding=r'utf-8',
input=self.__text
)
self.__text = result.stdout.strip()
@@ -204,5 +207,5 @@ def __enter__(self):
return self
def __exit__(self, type, value, traceback):
- if traceback is None:
+ if traceback is None and self.__autoflush:
self.flush()
diff --git a/poxy/fixers.py b/poxy/fixers.py
index f5f179a..5161085 100644
--- a/poxy/fixers.py
+++ b/poxy/fixers.py
@@ -23,8 +23,21 @@ class CustomTags(object):
'''
Modifies HTML using custom square-bracket [tags].
'''
- __double_tags = re.compile(r"\[\s*(span|div|aside|code|pre|h1|h2|h3|h4|h5|h6|em|strong|b|i|u|li|ul|ol)(.*?)\s*\](.*?)\[\s*/\1\s*\]", re.I | re.S)
- __single_tags = re.compile(r"\[\s*(/?(?:span|div|aside|code|pre|emoji|(?:parent_)?set_name|(?:parent_)?(?:add|remove|set)_class|br|li|ul|ol|(?:html)?entity))(\s+[^\]]+?)?\s*\]", re.I | re.S)
+ __double_tags = re.compile(
+ r'\[\s*('
+ + r'span|div|aside|code|pre|h1|h2|h3|h4|h5|h6|em|strong|b|i|u|li|ul|ol'
+ + r')(.*?)\s*\](.*?)\[\s*/\1\s*\]',
+ re.I | re.S
+ )
+ __single_tags = re.compile(
+ r'\[\s*(/?(?:'
+ + r'img|span|div|aside|code|pre|emoji'
+ + r'|(?:parent_)?set_(?:parent_)?(?:name|class)'
+ + r'|(?:parent_)?(?:add|remove)_(?:parent_)?class'
+ + r'|br|li|ul|ol|(?:html)?entity)'
+ + r')(\s+[^\]]+?)?\s*\]',
+ re.I | re.S
+ )
__allowed_parents = ('dd', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'aside', 'td')
@classmethod
@@ -60,7 +73,10 @@ def __single_tags_substitute(cls, m, out, context):
cp = context.emoji[tag_content][0]
return f'{cp:X};️'
return ''
- elif tag_name in ('add_class', 'remove_class', 'set_class', 'parent_add_class', 'parent_remove_class', 'parent_set_class'):
+ elif tag_name in (
+ r'add_class', r'remove_class', r'set_class',
+ r'parent_add_class', r'parent_remove_class', r'parent_set_class',
+ r'add_parent_class', r'remove_parent_class', r'set_parent_class'):
classes = []
if tag_content:
for s in tag_content.split():
@@ -69,7 +85,7 @@ def __single_tags_substitute(cls, m, out, context):
if classes:
out.append((tag_name, classes))
return ''
- elif tag_name in ('set_name', 'parent_set_name'):
+ elif tag_name in (r'set_name', r'parent_set_name', r'set_parent_name'):
if tag_content:
out.append((tag_name, tag_content))
return ''
@@ -99,25 +115,25 @@ def __call__(self, doc, context):
parent = tag.parent
new_tags = soup.replace_tag(tag, str(replacer))
for i in range(len(replacer)):
- if replacer[i][0].startswith('parent_'):
+ if replacer[i][0].find(r'parent_') != -1:
if parent is None:
continue
- if replacer[i][0] == 'parent_add_class':
+ if replacer[i][0] in (r'parent_add_class', r'add_parent_class'):
soup.add_class(parent, replacer[i][1])
- elif replacer[i][0] == 'parent_remove_class':
+ elif replacer[i][0] in (r'parent_remove_class', r'remove_parent_class'):
soup.remove_class(parent, replacer[i][1])
- elif replacer[i][0] == 'parent_set_class':
+ elif replacer[i][0] in (r'parent_set_class', r'set_parent_class'):
soup.set_class(parent, replacer[i][1])
- elif replacer[i][0] == 'parent_set_name':
+ elif replacer[i][0] in (r'parent_set_name', r'set_parent_name'):
parent.name = replacer[i][1]
elif len(new_tags) == 1 and not isinstance(new_tags[0], soup.NavigableString):
- if replacer[i][0] == 'add_class':
+ if replacer[i][0] == r'add_class':
soup.add_class(new_tags[0], replacer[i][1])
- elif replacer[i][0] == 'remove_class':
+ elif replacer[i][0] == r'remove_class':
soup.remove_class(new_tags[0], replacer[i][1])
- elif replacer[i][0] == 'set_class':
+ elif replacer[i][0] == r'set_class':
soup.set_class(new_tags[0], replacer[i][1])
- elif replacer[i][0] == 'set_name':
+ elif replacer[i][0] == r'set_name':
new_tags[0].name = replacer[i][1]
continue
@@ -247,10 +263,10 @@ def __call__(self, doc, context):
class StripIncludes(object):
'''
- Strips #include based on context.strip_includes.
+ Strips #include based on context.sources.strip_includes.
'''
def __call__(self, doc, context):
- if doc.article is None or not context.strip_includes:
+ if doc.article is None or not context.sources.strip_includes:
return False
changed = False
for include_div in doc.article.find_all(r'div', class_=r'm-doc-include'):
@@ -261,7 +277,7 @@ def __call__(self, doc, context):
if not (text.startswith('<') and text.endswith('>')):
continue
text = text[1:-1].strip()
- for strip in context.strip_includes:
+ for strip in context.sources.strip_includes:
if len(text) < len(strip) or not text.startswith(strip):
continue
if len(text) == len(strip):
@@ -360,7 +376,7 @@ def __colourize_compound_def(cls, tags, context):
assert len(tags) == 1 or tags[-1].string != '::'
full_str = ''.join([tag.get_text() for tag in tags])
- if context.highlighting.enums.fullmatch(full_str):
+ if context.code_blocks.enums.fullmatch(full_str):
soup.set_class(tags[-1], 'ne')
del tags[-1]
while tags and tags[-1].string == '::':
@@ -369,7 +385,7 @@ def __colourize_compound_def(cls, tags, context):
cls.__colourize_compound_def(tags, context)
return True
- if context.highlighting.types.fullmatch(full_str):
+ if context.code_blocks.types.fullmatch(full_str):
soup.set_class(tags[-1], 'ut')
del tags[-1]
while tags and tags[-1].string == '::':
@@ -378,7 +394,7 @@ def __colourize_compound_def(cls, tags, context):
cls.__colourize_compound_def(tags, context)
return True
- while not context.highlighting.namespaces.fullmatch(full_str):
+ while not context.code_blocks.namespaces.fullmatch(full_str):
del tags[-1]
while tags and tags[-1].string == '::':
del tags[-1]
@@ -496,17 +512,17 @@ def __call__(self, doc, context):
or isinstance(prev, soup.NavigableString)
or 'class' not in prev.attrs):
continue
- if ('s' in prev['class'] and context.highlighting.string_literals.fullmatch(span.get_text())):
+ if ('s' in prev['class'] and context.code_blocks.string_literals.fullmatch(span.get_text())):
soup.set_class(span, 'sa')
changed_this_block = True
- elif (prev['class'][0] in ('mf', 'mi', 'mb', 'mh') and context.highlighting.numeric_literals.fullmatch(span.get_text())):
+ elif (prev['class'][0] in ('mf', 'mi', 'mb', 'mh') and context.code_blocks.numeric_literals.fullmatch(span.get_text())):
soup.set_class(span, prev['class'][0])
changed_this_block = True
# preprocessor macros
spans = code_block('span', class_=('n', 'nl', 'kt', 'nc', 'nf'), string=True)
for span in spans:
- if context.highlighting.macros.fullmatch(span.get_text()):
+ if context.code_blocks.macros.fullmatch(span.get_text()):
soup.set_class(span, 'm')
changed_this_block = True
diff --git a/poxy/project.py b/poxy/project.py
index 46f6f94..74b27a8 100644
--- a/poxy/project.py
+++ b/poxy/project.py
@@ -15,10 +15,54 @@
import threading
import json
import datetime
+import shutil
+import tempfile
from schema import Schema, Or, And, Optional
+#=======================================================================================================================
+# schemas
+#=======================================================================================================================
+
+_py2toml = {
+ str : r'string',
+ list : r'array',
+ dict : r'table',
+ int : r'integer',
+ float : r'float',
+ bool : r'boolean',
+ datetime.date : r'date',
+ datetime.time : r'time',
+ datetime.datetime : r'date-time'
+}
+
+
+
+def FixedArrayOf(typ, length, name=''):
+ global _py2toml
+ return And(
+ [typ],
+ lambda v: len(v) == length,
+ error=rf'{name + ": " if name else ""}expected array of {length} {_py2toml[typ]}{"s" if length != 1 else ""}'
+ )
+
+
+
+def ValueOrArray(typ, name='', length=None):
+ global _py2toml
+ if length is None:
+ return Or(typ, [typ], error=rf'{name + ": " if name else ""}expected {_py2toml[typ]} or array of {_py2toml[typ]}s')
+ else:
+ err = rf'{name + ": " if name else ""}expected {_py2toml[typ]} or array of {length} {_py2toml[typ]}{"s" if length != 1 else ""}'
+ return And(
+ Or(typ, [typ], error=err),
+ lambda v: not isinstance(v, list) or len(v) == length,
+ error=err
+ )
+
+
+
#=======================================================================================================================
# internal helpers
#=======================================================================================================================
@@ -610,6 +654,12 @@ def _assert_no_unexpected_keys(raw, validated, prefix=''):
class _Warnings(object):
+ schema = {
+ Optional(r'enabled') : bool,
+ Optional(r'treat_as_errors') : bool,
+ Optional(r'undocumented') : bool,
+ }
+
def __init__(self, config):
self.enabled = None
self.treat_as_errors = None
@@ -617,21 +667,29 @@ def __init__(self, config):
if 'warnings' not in config:
return
- vals = config['warnings']
+ config = config['warnings']
- if 'enabled' in vals:
- self.enabled = bool(vals['enabled'])
+ if 'enabled' in config:
+ self.enabled = bool(config['enabled'])
- if 'treat_as_errors' in vals:
- self.treat_as_errors = bool(vals['treat_as_errors'])
+ if 'treat_as_errors' in config:
+ self.treat_as_errors = bool(config['treat_as_errors'])
- if 'undocumented' in vals:
- self.undocumented = bool(vals['undocumented'])
+ if 'undocumented' in config:
+ self.undocumented = bool(config['undocumented'])
+class _CodeBlocks(object):
+ schema = {
+ Optional(r'types') : ValueOrArray(str, name=r'types'),
+ Optional(r'macros') : ValueOrArray(str, name=r'macros'),
+ Optional(r'string_literals') : ValueOrArray(str, name=r'string_literals'),
+ Optional(r'numeric_literals') : ValueOrArray(str, name=r'numeric_literals'),
+ Optional(r'enums') : ValueOrArray(str, name=r'enums'),
+ Optional(r'namespaces') : ValueOrArray(str, name=r'namespaces'),
+ }
-class _Highlighting(object):
def __init__(self, config, defines):
self.types = copy.deepcopy(_Defaults.types)
self.macros = copy.deepcopy(_Defaults.macros)
@@ -640,41 +698,41 @@ def __init__(self, config, defines):
self.enums = copy.deepcopy(_Defaults.enums)
self.namespaces = copy.deepcopy(_Defaults.namespaces)
- if 'highlighting' in config:
- vals = config['highlighting']
+ if 'code' in config:
+ config = config['code']
- if 'types' in vals:
- for t in coerce_collection(vals['types']):
+ if 'types' in config:
+ for t in coerce_collection(config['types']):
type_ = t.strip()
if type_:
self.types.add(type_)
- if 'macros' in vals:
- for m in coerce_collection(vals['macros']):
+ if 'macros' in config:
+ for m in coerce_collection(config['macros']):
macro = m.strip()
if macro:
self.macros.add(macro)
- if 'string_literals' in vals:
- for lit in coerce_collection(vals['string_literals']):
+ if 'string_literals' in config:
+ for lit in coerce_collection(config['string_literals']):
literal = lit.strip()
if literal:
self.string_literals.add(literal)
- if 'numeric_literals' in vals:
- for lit in coerce_collection(vals['numeric_literals']):
+ if 'numeric_literals' in config:
+ for lit in coerce_collection(config['numeric_literals']):
literal = lit.strip()
if literal:
self.numeric_literals.add(literal)
- if 'enums' in vals:
- for e in coerce_collection(vals['enums']):
+ if 'enums' in config:
+ for e in coerce_collection(config['enums']):
enum = e.strip()
if enum:
self.enums.add(enum)
- if 'namespaces' in vals:
- for ns in coerce_collection(vals['namespaces']):
+ if 'namespaces' in config:
+ for ns in coerce_collection(config['namespaces']):
namespace = ns.strip()
if namespace:
self.namespaces.add(namespace)
@@ -689,45 +747,102 @@ def __init__(self, config, defines):
-#=======================================================================================================================
-# schemas
-#=======================================================================================================================
+class _Inputs(object):
+ schema = {
+ Optional(r'paths') : ValueOrArray(str, name=r'paths'),
+ Optional(r'recursive_paths') : ValueOrArray(str, name=r'recursive_paths'),
+ }
-_py2toml = {
- str : r'string',
- list : r'array',
- dict : r'table',
- int : r'integer',
- float : r'float',
- bool : r'boolean',
- datetime.date : r'date',
- datetime.time : r'time',
- datetime.datetime : r'date-time'
-}
+ def __init__(self, config, key, input_dir):
+ self.paths = []
+ if key not in config:
+ return
+ config = config[key]
+
+ paths = set()
+ for recursive in (False, True):
+ key = r'recursive_paths' if recursive else r'paths'
+ if key in config:
+ for v in coerce_collection(config[key]):
+ path = v.strip()
+ if not path:
+ continue
+ path = Path(path)
+ if not path.is_absolute():
+ path = Path(input_dir, path)
+ path = path.resolve()
+ if not path.exists():
+ raise Exception(rf"{key}: '{path}' does not exist")
+ if not (path.is_file() or path.is_dir()):
+ raise Exception(rf"{key}: '{path}' was not a directory or file")
+ paths.add(str(path))
+ if recursive and path.is_dir():
+ for subdir in enum_subdirs(path):
+ paths.add(str(subdir))
+ self.paths = list(paths)
+ self.paths.sort()
+
+
+
+class _FilteredInputs(_Inputs):
+ schema = combine_dicts(_Inputs.schema, {
+ Optional(r'patterns') : ValueOrArray(str, name=r'patterns')
+ })
+
+ def __init__(self, config, key, input_dir):
+ super().__init__(config, key, input_dir)
+ self.patterns = None
+
+ if key not in config:
+ return
+ config = config[key]
+ if r'patterns' in config:
+ self.patterns = set()
+ for v in coerce_collection(config[r'patterns']):
+ val = v.strip()
+ if val:
+ self.patterns.add(val)
-def FixedArrayOf(typ, length, name=''):
- global _py2toml
- return And(
- [typ],
- lambda v: len(v) == length,
- error=rf'{name + ": " if name else ""}expected array of {length} {_py2toml[typ]}{"s" if length != 1 else ""}'
- )
+class _Sources(_FilteredInputs):
+ schema = combine_dicts(_FilteredInputs.schema, {
+ Optional(r'strip_paths') : ValueOrArray(str, name=r'strip_paths'),
+ Optional(r'strip_includes') : ValueOrArray(str, name=r'strip_includes'),
+ Optional(r'extract_all') : bool,
+ })
+
+ def __init__(self, config, key, input_dir):
+ super().__init__(config, key, input_dir)
+
+ self.strip_paths = []
+ self.strip_includes = []
+ self.extract_all = None
+ if self.patterns is None:
+ self.patterns = copy.deepcopy(_Defaults.source_patterns)
+
+ if key not in config:
+ return
+ config = config[key]
+
+ if r'strip_paths' in config:
+ for s in coerce_collection(config[r'strip_paths']):
+ path = s.strip()
+ if path:
+ self.strip_paths.append(path)
+
+ if r'strip_includes' in config:
+ for s in coerce_collection(config[r'strip_includes']):
+ path = s.strip().replace('\\', '/')
+ if path:
+ self.strip_includes.append(path)
+ self.strip_includes.sort(key = lambda v: len(v), reverse=True)
+
+ if r'extract_all' in config:
+ self.extract_all = bool(config['extract_all'])
-def ValueOrArray(typ, name='', length=None):
- global _py2toml
- if length is None:
- return Or(typ, [typ], error=rf'{name + ": " if name else ""}expected {_py2toml[typ]} or array of {_py2toml[typ]}s')
- else:
- err = rf'{name + ": " if name else ""}expected {_py2toml[typ]} or array of {length} {_py2toml[typ]}{"s" if length != 1 else ""}'
- return And(
- Or(typ, [typ], error=err),
- lambda v: not isinstance(v, list) or len(v) == length,
- error=err
- )
@@ -743,48 +858,32 @@ class Context(object):
__data_files_lock = threading.Lock()
__config_schema = Schema(
{
- Optional(r'name') : str,
+ Optional(r'aliases') : {str : str},
+ Optional(r'autolinks') : {str : str},
+ Optional(r'badges') : {str : FixedArrayOf(str, 2, name=r'badges') },
+ Optional(r'code_blocks') : _CodeBlocks.schema,
+ Optional(r'cpp') : Or(str, int, error=r'cpp: expected string or integer'),
+ Optional(r'defines') : {str : Or(str, int, bool)},
Optional(r'description') : str,
- Optional(r'github') : str,
- Optional(r'logo') : str,
+ Optional(r'examples') : _FilteredInputs.schema,
+ Optional(r'extra_files') : ValueOrArray(str, name=r'extra_files'),
Optional(r'favicon') : str,
- Optional(r'private_repo') : bool,
- Optional(r'show_includes') : bool,
Optional(r'generate_tagfile') : bool,
+ Optional(r'github') : str,
+ Optional(r'images') : _Inputs.schema,
+ Optional(r'implementation_headers') : {str : ValueOrArray(str)},
+ Optional(r'inline_namespaces') : ValueOrArray(str, name=r'inline_namespaces'),
Optional(r'internal_docs') : bool,
- Optional(r'extract_all') : bool,
- Optional(r'cpp') : Or(str, int, error=r'cpp: expected string or integer'),
Optional(r'license') : ValueOrArray(str, length=2, name=r'license'),
- Optional(r'badges') : {str : FixedArrayOf(str, 2, name=r'badges') },
- Optional(r'navbar') : ValueOrArray(str, name=r'navbar'),
- Optional(r'inline_namespaces') : ValueOrArray(str, name=r'inline_namespaces'),
- Optional(r'extra_files') : ValueOrArray(str, name=r'extra_files'),
- Optional(r'strip_paths') : ValueOrArray(str, name=r'strip_paths'),
- Optional(r'strip_includes') : ValueOrArray(str, name=r'strip_includes'),
- Optional(r'sources') : ValueOrArray(str, name=r'sources'),
- Optional(r'recursive_sources') : ValueOrArray(str, name=r'recursive_sources'),
- Optional(r'source_patterns') : ValueOrArray(str, name=r'source_patterns'),
+ Optional(r'logo') : str,
Optional(r'meta_tags') : {str : Or(str, int)},
+ Optional(r'name') : str,
+ Optional(r'navbar') : ValueOrArray(str, name=r'navbar'),
+ Optional(r'private_repo') : bool,
+ Optional(r'show_includes') : bool,
+ Optional(r'sources') : _Sources.schema,
Optional(r'tagfiles') : {str : str},
- Optional(r'defines') : {str : Or(str, int, bool)},
- Optional(r'autolinks') : {str : str},
- Optional(r'aliases') : {str : str},
- Optional(r'implementation_headers') : {str : ValueOrArray(str)},
- Optional(r'warnings') :
- {
- Optional(r'enabled') : bool,
- Optional(r'treat_as_errors') : bool,
- Optional(r'undocumented') : bool,
- },
- Optional(r'highlighting') :
- {
- Optional(r'types') : ValueOrArray(str, name=r'types'),
- Optional(r'macros') : ValueOrArray(str, name=r'macros'),
- Optional(r'string_literals') : ValueOrArray(str, name=r'string_literals'),
- Optional(r'numeric_literals') : ValueOrArray(str, name=r'numeric_literals'),
- Optional(r'enums') : ValueOrArray(str, name=r'enums'),
- Optional(r'namespaces') : ValueOrArray(str, name=r'namespaces'),
- },
+ Optional(r'warnings') : _Warnings.schema,
},
ignore_extra_keys=True
)
@@ -816,8 +915,11 @@ def verbose(self, msg, indent=None):
def info(self, msg, indent=None):
self.__log(logging.INFO, msg, indent=indent)
- def warning(self, msg, indent=None):
- self.__log(logging.WARNING, rf'Warning: {msg}', indent=indent)
+ def warning(self, msg, indent=None, prefix=r'Warning: '):
+ if prefix:
+ self.__log(logging.WARNING, rf'{prefix}{msg}', indent=indent)
+ else:
+ self.__log(logging.WARNING, msg, indent=indent)
def verbose_value(self, name, val):
if not self.__verbose:
@@ -889,7 +991,7 @@ def __init_data_files(cls, context):
finally:
cls.__data_files_lock.release()
- def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir, temp_file_name, logger, dry_run):
+ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir, doxygen_path, logger, dry_run, treat_warnings_as_errors):
self.logger = logger
self.__verbose = bool(verbose)
@@ -899,9 +1001,6 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
self.cleanup = bool(cleanup)
self.verbose_value(r'Context.cleanup', self.cleanup)
- self.temp_file_name = str(temp_file_name).strip() if temp_file_name is not None else None
- self.verbose_value(r'Context.temp_file_name', self.temp_file_name)
-
threads = int(threads)
if threads <= 0:
threads = os.cpu_count()
@@ -912,6 +1011,8 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
self.tagfile_path = None
self.warnings = None
+ now = datetime.datetime.utcnow()
+
# resolve paths
if 1:
@@ -922,9 +1023,7 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
self.verbose_value(r'Context.data_dir', self.data_dir)
if output_dir is None:
output_dir = Path.cwd()
- if not isinstance(output_dir, Path):
- output_dir = Path(str(output_dir))
- self.output_dir = output_dir.resolve()
+ self.output_dir = coerce_path(output_dir).resolve()
self.verbose_value(r'Context.output_dir', self.output_dir)
assert self.output_dir.is_absolute()
@@ -932,11 +1031,11 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
input_dir = None
self.config_path = None
self.doxyfile_path = None
+ self.temp_doxyfile_path = None
if config_path is None:
config_path = self.output_dir
else:
- if not isinstance(config_path, Path):
- config_path = Path(str(config_path))
+ config_path = coerce_path(config_path)
if not config_path.is_absolute():
config_path = Path(self.output_dir, config_path)
config_path = config_path.resolve()
@@ -986,12 +1085,32 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
self.verbose_value(r'Context.xml_dir', self.xml_dir)
self.verbose_value(r'Context.html_dir', self.html_dir)
+ # doxygen
+ if doxygen_path is not None:
+ doxygen_path = coerce_path(doxygen_path).resolve()
+ if not doxygen_path.exists() and Path(str(doxygen_path) + r'.exe').exists():
+ doxygen_path = Path(str(doxygen_path) + r'.exe')
+ if doxygen_path.is_dir():
+ p = Path(doxygen_path, 'doxygen.exe')
+ if not p.exists() or not p.is_file() or not os.access(str(p), os.X_OK):
+ p = Path(doxygen_path, 'doxygen')
+ if not p.exists() or not p.is_file() or not os.access(str(p), os.X_OK):
+ raise Exception(rf'Could not find Doxygen executable in {doxygen_path}')
+ doxygen_path = p
+ assert_existing_file(doxygen_path)
+ self.doxygen_path = doxygen_path
+ else:
+ self.doxygen_path = shutil.which(r'doxygen')
+ if self.doxygen_path is None:
+ raise Exception(rf'Could not find Doxygen on system path')
+ if not os.access(str(self.doxygen_path), os.X_OK):
+ raise Exception(rf'{doxygen_path} was not an executable file')
+ self.verbose_value(r'Context.doxygen_path', self.doxygen_path)
+
# m.css
if mcss_dir is None:
mcss_dir = Path(self.data_dir, r'mcss')
- if not isinstance(mcss_dir, Path):
- mcss_dir = Path(str(mcss_dir))
- mcss_dir = mcss_dir.resolve()
+ mcss_dir = coerce_path(mcss_dir).resolve()
assert_existing_directory(mcss_dir)
assert_existing_file(Path(mcss_dir, 'documentation/doxygen.py'))
self.mcss_dir = mcss_dir
@@ -1009,6 +1128,8 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
config = _assert_no_unexpected_keys(config, self.__config_schema.validate(config))
self.warnings = _Warnings(config) # printed in run.py post-doxyfile
+ if treat_warnings_as_errors:
+ self.warnings.treat_as_errors = True
# project name (PROJECT_NAME)
self.name = ''
@@ -1061,7 +1182,7 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
# project C++ version
# defaults to 'current' cpp year version based on (current year - 2)
- self.cpp = max(int(datetime.datetime.now().year) - 2, 2011)
+ self.cpp = max(int(now.year) - 2, 2011)
self.cpp = self.cpp - ((self.cpp - 2011) % 3)
if 'cpp' in config:
self.cpp = str(config['cpp']).lstrip('0 \t').rstrip()
@@ -1093,41 +1214,41 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
self.logo = file.resolve()
self.verbose_value(r'Context.logo', self.logo)
- # sources + recursive_sources (INPUT)
- self.sources = set()
- for recursive in (False, True):
- key = r'recursive_sources' if recursive else r'sources'
- if key in config:
- for v in coerce_collection(config[key]):
- path = v.strip()
- if not path:
- continue
- path = Path(path)
- if not path.is_absolute():
- path = Path(self.input_dir, path)
- path = path.resolve()
- if not path.exists():
- raise Exception(rf"{key}: '{path}' does not exist")
- if not (path.is_file() or path.is_dir()):
- raise Exception(rf"{key}: '{path}' was not a directory or file")
- self.sources.add(str(path))
- if recursive and path.is_dir():
- for subdir in enum_subdirs(path):
- self.sources.add(str(subdir))
- self.sources = list(self.sources)
- self.sources.sort()
- self.verbose_value(r'Context.sources', self.sources)
-
- # sources (FILE_PATTERNS)
- if 'source_patterns' in config:
- self.source_patterns = set()
- for v in coerce_collection(config['source_patterns']):
- val = v.strip()
- if val:
- self.source_patterns.add(val)
- else:
- self.source_patterns = copy.deepcopy(_Defaults.source_patterns)
- self.verbose_value(r'Context.source_patterns', self.source_patterns)
+ # sources (INPUT, FILE_PATTERNS, STRIP_FROM_PATH, STRIP_FROM_INC_PATH, EXTRACT_ALL)
+ self.sources = _Sources(config, 'sources', self.input_dir)
+ self.verbose_object(r'Context.sources', self.sources)
+
+ # images (IMAGE_PATH)
+ self.images = _Inputs(config, 'images', self.input_dir)
+ self.verbose_object(r'Context.images', self.images)
+
+ # examples (EXAMPLES_PATH, EXAMPLE_PATTERNS)
+ self.examples = _FilteredInputs(config, 'examples', self.input_dir)
+ self.verbose_object(r'Context.examples', self.examples)
+
+ # tagfiles (TAGFILES)
+ self.tagfiles = {
+ str(coerce_path(self.data_dir, r'cppreference-doxygen-web.tag.xml')) : r'http://en.cppreference.com/w/'
+ }
+ self.unresolved_tagfiles = False
+ for k,v in _extract_kvps(config, 'tagfiles').items():
+ source = str(k)
+ dest = str(v)
+ if source and dest:
+ if is_uri(source):
+ file = str(Path(tempfile.gettempdir(), rf'poxy.tagfile.{sha1(source)}.{now.year}-{now.isocalendar().week}.xml'))
+ self.tagfiles[source] = (file, dest)
+ self.unresolved_tagfiles = True
+ else:
+ source = Path(source)
+ if not source.is_absolute():
+ source = Path(self.input_dir, source)
+ source = str(source.resolve())
+ self.tagfiles[source] = dest
+ for k, v in self.tagfiles.items():
+ if isinstance(v, str):
+ assert_existing_file(k)
+ self.verbose_value(r'Context.tagfiles', self.tagfiles)
# m.css navbar
if 'navbar' in config:
@@ -1156,15 +1277,6 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
self.meta_tags['description'] = self.description
self.verbose_value(r'Context.meta_tags', self.meta_tags)
- # tagfiles (TAGFILES)
- self.tagfiles = {}
- for k,v in _extract_kvps(config, 'tagfiles').items():
- self.tagfiles[str(Path(self.input_dir, k).resolve())] = v
- self.tagfiles[str(Path(self.data_dir, r'cppreference-doxygen-web.tag.xml'))] = r'http://en.cppreference.com/w/'
- for k, v in self.tagfiles.items():
- assert_existing_file(k)
- self.verbose_value(r'Context.tagfiles', self.tagfiles)
-
# inline namespaces for old versions of doxygen
self.inline_namespaces = copy.deepcopy(_Defaults.inline_namespaces)
if 'inline_namespaces' in config:
@@ -1199,31 +1311,6 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
self.internal_docs = bool(config['internal_docs'])
self.verbose_value(r'Context.internal_docs', self.internal_docs)
- # strip_paths (STRIP_FROM_PATH)
- self.strip_paths = []
- if r'strip_paths' in config:
- for s in coerce_collection(config['strip_paths']):
- path = s.strip()
- if path:
- self.strip_paths.append(path)
- self.verbose_value(r'Context.strip_paths', self.strip_paths)
-
- # strip_includes (STRIP_FROM_INC_PATH)
- self.strip_includes = []
- if r'strip_includes' in config:
- for s in coerce_collection(config['strip_includes']):
- path = s.strip().replace('\\', '/')
- if path:
- self.strip_includes.append(path)
- self.strip_includes.sort(key = lambda v: len(v), reverse=True)
- self.verbose_value(r'Context.strip_includes', self.strip_includes)
-
- # extract_all (EXTRACT_ALL)
- self.extract_all = None
- if 'extract_all' in config:
- self.extract_all = bool(config['extract_all'])
- self.verbose_value(r'Context.extract_all', self.extract_all)
-
# generate_tagfile (GENERATE_TAGFILE)
self.generate_tagfile = None # not (self.private_repo or self.internal_docs)
if 'generate_tagfile' in config:
@@ -1336,7 +1423,7 @@ def __init__(self, config_path, output_dir, threads, cleanup, verbose, mcss_dir,
raise Exception(rf'extra_files: Multiple source files with the name {f.name}')
extra_filenames.add(f.name)
- self.highlighting = _Highlighting(config, non_cpp_def_defines) # printed in run.py post-xml
+ self.code_blocks = _CodeBlocks(config, non_cpp_def_defines) # printed in run.py post-xml
# initialize other data from files on disk
self.__init_data_files(self)
diff --git a/poxy/run.py b/poxy/run.py
index 179c2de..818302b 100644
--- a/poxy/run.py
+++ b/poxy/run.py
@@ -22,6 +22,7 @@
import concurrent.futures as futures
import argparse
import tempfile
+import requests
from lxml import etree
from io import BytesIO
@@ -44,6 +45,7 @@
(r'DISTRIBUTE_GROUP_DOC', False),
(r'DOXYFILE_ENCODING', r'UTF-8'),
(r'ENABLE_PREPROCESSING', True),
+ (r'EXAMPLE_RECURSIVE', False),
(r'EXCLUDE_SYMLINKS', False),
(r'EXPAND_ONLY_PREDEF', False),
(r'EXTERNAL_GROUPS', False),
@@ -56,6 +58,8 @@
(r'EXTRACT_PRIVATE', False),
(r'EXTRACT_STATIC', False),
(r'FILTER_PATTERNS', None),
+ (r'FILTER_SOURCE_FILES', False),
+ (r'FILTER_SOURCE_PATTERNS', None),
(r'FORCE_LOCAL_INCLUDES', False),
(r'FULL_PATH_NAMES', True),
(r'GENERATE_AUTOGEN_DEF', False),
@@ -144,22 +148,29 @@ def _preprocess_doxyfile(context):
with doxygen.Doxyfile(
doxyfile_path = context.doxyfile_path,
cwd = context.input_dir,
- logger = context.verbose_logger
+ logger = context.verbose_logger,
+ doxygen_path = context.doxygen_path,
+ flush_at_exit = not context.dry_run
) as df:
df.append()
df.append(r'#---------------------------------------------------------------------------')
df.append(r'# marzer/poxy')
- df.append(r'#---------------------------------------------------------------------------')
+ df.append(r'#---------------------------------------------------------------------------', end='\n\n')
# apply regular doxygen settings
if 1:
- df.set_value(r'INPUT', context.sources)
- df.set_value(r'FILE_PATTERNS', context.source_patterns)
- df.set_value(r'OUTPUT_DIRECTORY', context.output_dir)
- df.add_value(r'EXCLUDE', context.html_dir)
- df.add_value(r'EXCLUDE', context.xml_dir)
+ df.append(r'# doxygen default overrides', end='\n\n') # ----------------------------------------
+
+ global _doxygen_overrides
+ for k, v in _doxygen_overrides:
+ df.set_value(k, v)
+
+ df.append()
+ df.append(r'# general config', end='\n\n') # ---------------------------------------------------
+
+ df.set_value(r'OUTPUT_DIRECTORY', context.output_dir)
if not context.name:
context.name = df.get_value(r'PROJECT_NAME', fallback='')
df.set_value(r'PROJECT_NAME', context.name)
@@ -198,10 +209,12 @@ def _preprocess_doxyfile(context):
else:
df.set_value(r'GENERATE_TAGFILE', None)
- if context.extract_all is None:
- context.extract_all = df.get_boolean(r'EXTRACT_ALL', fallback=False)
- context.verbose_value(r'Context.extract_all', context.extract_all)
- df.set_value(r'EXTRACT_ALL', context.extract_all)
+ df.set_value(r'NUM_PROC_THREADS', context.threads)
+ df.add_value(r'CLANG_OPTIONS', rf'-std=c++{context.cpp%100}')
+ df.add_value(r'CLANG_OPTIONS', r'-Wno-everything')
+
+ df.append()
+ df.append(r'# context.warnings', end='\n\n') # ---------------------------------------------------
if context.warnings.enabled is None:
context.warnings.enabled = df.get_boolean(r'WARNINGS', fallback=True)
@@ -211,39 +224,61 @@ def _preprocess_doxyfile(context):
if context.warnings.treat_as_errors is None:
context.warnings.treat_as_errors = df.get_boolean(r'WARN_AS_ERROR', fallback=False)
context.verbose_value(r'Context.warnings.treat_as_errors', context.warnings.treat_as_errors)
- df.set_value(r'WARN_AS_ERROR', context.warnings.treat_as_errors)
+ df.set_value(r'WARN_AS_ERROR', False) # we do this ourself
if context.warnings.undocumented is None:
context.warnings.undocumented = df.get_boolean(r'WARN_IF_UNDOCUMENTED', fallback=True)
context.verbose_value(r'Context.warnings.undocumented', context.warnings.undocumented)
df.set_value(r'WARN_IF_UNDOCUMENTED', context.warnings.undocumented)
- global _doxygen_overrides
df.append()
- for k, v in _doxygen_overrides:
- df.set_value(k, v)
- df.set_value(r'NUM_PROC_THREADS', context.threads)
- df.add_value(r'CLANG_OPTIONS', rf'-std=c++{context.cpp%100}')
- df.add_value(r'CLANG_OPTIONS', r'-Wno-everything')
- df.add_value(r'STRIP_FROM_PATH', context.strip_paths)
+ df.append(r'# context.sources', end='\n\n') # ----------------------------------------------------
+
+ df.add_value(r'INPUT', context.sources.paths)
+ df.set_value(r'FILE_PATTERNS', context.sources.patterns)
+ df.add_value(r'EXCLUDE', { context.html_dir, context.xml_dir })
+ df.add_value(r'STRIP_FROM_PATH', context.sources.strip_paths)
- if context.tagfiles:
+ if context.sources.extract_all is None:
+ context.sources.extract_all = df.get_boolean(r'EXTRACT_ALL', fallback=False)
+ context.verbose_value(r'Context.sources.extract_all', context.sources.extract_all)
+ df.set_value(r'EXTRACT_ALL', context.sources.extract_all)
+
+ df.append()
+ df.append(r'# context.examples', end='\n\n') # ----------------------------------------------------
+
+ df.add_value(r'EXAMPLE_PATH', context.examples.paths)
+ df.set_value(r'EXAMPLE_PATTERNS', context.examples.patterns)
+
+ if context.images.paths: # ----------------------------------------------------
+ df.append()
+ df.append(r'# context.images', end='\n\n')
+ df.add_value(r'IMAGE_PATH', context.images.paths)
+
+ if context.tagfiles: # ----------------------------------------------------
df.append()
- df.add_value(r'TAGFILES', [rf'{k}={v}' for k,v in context.tagfiles.items()])
+ df.append(r'# context.tagfiles', end='\n\n')
+ df.add_value(r'TAGFILES', [rf'{k if isinstance(v, str) else v[0]}={v if isinstance(v, str) else v[1]}' for k,v in context.tagfiles.items()])
- if context.aliases:
+ if context.aliases: # ----------------------------------------------------
df.append()
+ df.append(r'# context.aliases', end='\n\n')
df.add_value(r'ALIASES', [rf'{k}={v}' for k,v in context.aliases.items()])
- if context.defines:
+ if context.defines: # ----------------------------------------------------
df.append()
+ df.append(r'# context.defines', end='\n\n')
df.add_value(r'PREDEFINED', [rf'{k}={v}' for k,v in context.defines.items()])
# apply m.css stuff
if 1:
df.append()
+ df.append(r'# m.css', end='\n\n')
+
+ df.append(r'##!')
+ df.append(rf'##! M_SHOW_UNDOCUMENTED = {"YES" if context.sources.extract_all else "NO"}')
df.append(r'##!')
- df.append(rf'##! M_SHOW_UNDOCUMENTED = {"YES" if context.extract_all else "NO"}')
+ df.append(rf'##! M_FAVICON = "{context.favicon if context.favicon is not None else ""}"')
df.append(r'##!')
if not df.contains(r'M_CLASS_TREE_EXPAND_LEVELS'):
df.append(r'##! M_CLASS_TREE_EXPAND_LEVELS = 3')
@@ -260,9 +295,6 @@ def _preprocess_doxyfile(context):
if not df.contains(r'M_SEARCH_DISABLED'):
df.append(r'##! M_SEARCH_DISABLED = NO')
df.append(r'##!')
- if not df.contains(r'M_FAVICON'):
- df.append(rf'##! M_FAVICON = "{context.favicon if context.favicon is not None else ""}"')
- df.append(r'##!')
if not df.contains(r'M_LINKS_NAVBAR1') and not df.contains(r'M_LINKS_NAVBAR2'):
if context.navbar:
bar = [v for v in context.navbar]
@@ -297,6 +329,8 @@ def _preprocess_doxyfile(context):
if context.github:
top_row.append(rf'Github')
top_row.append(rf'Report an issue')
+ if context.license and context.license[r'uri']:
+ top_row.append(rf'License')
if context.generate_tagfile:
top_row.append(rf'Doxygen tagfile')
if top_row:
@@ -310,15 +344,13 @@ def _preprocess_doxyfile(context):
df.append(r'##!')
# move to a temp file path
- if context.temp_file_name:
- df.path = coerce_path(context.output_dir, context.temp_file_name)
- else:
- df.path = coerce_path(context.output_dir, df.path.name + rf'.{df.hash()}.temp')
- context.doxyfile_path = df.path
- context.verbose_value(r'Context.doxyfile_path', context.doxyfile_path)
+ df.path = coerce_path(tempfile.gettempdir(), rf'poxy.{df.hash()}.Doxyfile')
+ context.temp_doxyfile_path = df.path
+ context.verbose_value(r'Context.temp_doxyfile_path', context.temp_doxyfile_path)
# debug dump final doxyfile
- df.cleanup()
+ if not context.is_verbose() or 1:
+ df.cleanup()
if context.dry_run:
context.info(r'#====================================================================================')
context.info(r'# poxy-generated Doxyfile')
@@ -405,7 +437,7 @@ def _postprocess_xml(context):
deleted = True
extracted_implementation = False
- tentative_macros = regex_or(context.highlighting.macros)
+ tentative_macros = regex_or(context.code_blocks.macros)
macros = set()
cpp_tree = CppTree()
xml_files = get_all_files(context.xml_dir, any=(r'*.xml'))
@@ -607,11 +639,11 @@ def _postprocess_xml(context):
write_xml_to_file(xml, xml_file)
# add to syntax highlighter
- context.highlighting.namespaces.add(cpp_tree.matcher(CppTree.NAMESPACES))
- context.highlighting.types.add(cpp_tree.matcher(CppTree.TYPES))
- context.highlighting.enums.add(cpp_tree.matcher(CppTree.ENUM_VALUES))
+ context.code_blocks.namespaces.add(cpp_tree.matcher(CppTree.NAMESPACES))
+ context.code_blocks.types.add(cpp_tree.matcher(CppTree.TYPES))
+ context.code_blocks.enums.add(cpp_tree.matcher(CppTree.ENUM_VALUES))
for macro in macros:
- context.highlighting.macros.add(macro)
+ context.code_blocks.macros.add(macro)
# merge extracted implementations
if extracted_implementation:
@@ -767,7 +799,96 @@ def _postprocess_html(context):
# RUN
#=======================================================================================================================
-def run(config_path='.', output_dir='.', threads=-1, cleanup=True, verbose=False, mcss_dir=None, temp_file_name=None, logger=None, dry_run=False):
+def _read_output_streams(stdout, stderr):
+ stdout.seek(0)
+ stderr.seek(0)
+ return {
+ r'stdout' : stdout.read().strip(),
+ r'stderr' : stderr.read().strip()
+ }
+
+
+
+def _dump_output_streams(context, outputs, source=''):
+ if source:
+ source = rf'{source} '
+ if outputs[r'stdout']:
+ context.info(rf'{source}stdout:')
+ context.info(outputs[r'stdout'], indent=r' ')
+ if outputs[r'stderr']:
+ context.info(rf'{source}stderr:')
+ context.info(outputs[r'stderr'], indent=r' ')
+
+
+_warnings_regexes = (
+ # doxygen
+ re.compile(r'^(?P.+?):(?P[0-9]+): warning:\s*(?P.+?)\s*$', re.I),
+ # m.css
+ re.compile(r'^WARNING:root:(?P.+[.]xml):\s*(?P.+?)\s*$', re.I),
+ re.compile(r'^WARNING:root:\s*(?P.+?)\s*$', re.I),
+ # catch-all
+ re.compile(r'^(?:Warning|Error):\s*(?P.+?)\s*$', re.I)
+)
+_warnings_trim_suffixes = (
+ r'Skipping it...',
+)
+_warnings_substitutions = (
+ (r'does not exist or is not a file', r'did not exist or was not a file'),
+)
+_warnings_ignored = (
+ r'inline code has multiple lines, fallback to a code block',
+ r'libgs not found'
+)
+def _extract_warnings(outputs):
+ if not outputs:
+ return []
+
+ global _warnings_regexes
+ global _warnings_ignored
+ global _warnings_trim_suffixes
+ global _warnings_substitutions
+
+ warnings = []
+ for k, v in outputs.items():
+ if not v:
+ continue
+ output = v.split('\n')
+ for o in output:
+ for regex in _warnings_regexes:
+ m = regex.fullmatch(o)
+ if m:
+ text = m[r'text'].strip()
+ for suffix in _warnings_trim_suffixes:
+ if text.endswith(suffix):
+ text = text[:-len(suffix)].strip()
+ break
+ for old, new in _warnings_substitutions:
+ text = text.replace(old, new)
+ if not text or text in _warnings_ignored:
+ break
+ groups = m.groupdict()
+ if r'file' in groups:
+ if r'line' in groups:
+ warnings.append(rf"{m[r'file']}:{m[r'line']}: {text}")
+ else:
+ warnings.append(rf"{m[r'file']}: {text}")
+ else:
+ warnings.append(text)
+ break
+ return warnings
+
+
+def run(config_path='.',
+ output_dir='.',
+ threads=-1,
+ cleanup=True,
+ verbose=False,
+ mcss_dir=None,
+ doxygen_path=None,
+ logger=None,
+ dry_run=False,
+ treat_warnings_as_errors=None
+ ):
context = project.Context(
config_path = config_path,
@@ -776,9 +897,10 @@ def run(config_path='.', output_dir='.', threads=-1, cleanup=True, verbose=False
cleanup = cleanup,
verbose = verbose,
mcss_dir = mcss_dir,
- temp_file_name = temp_file_name,
+ doxygen_path = doxygen_path,
logger = logger,
- dry_run = dry_run
+ dry_run = dry_run,
+ treat_warnings_as_errors = treat_warnings_as_errors
)
with ScopeTimer(r'All tasks', print_start=False, print_end=context.verbose if dry_run else context.info) as all_tasks_timer:
@@ -796,26 +918,53 @@ def run(config_path='.', output_dir='.', threads=-1, cleanup=True, verbose=False
delete_directory(context.xml_dir, logger=context.verbose_logger)
delete_directory(context.html_dir, logger=context.verbose_logger)
+ # resolve any uri tagfiles
+ if context.unresolved_tagfiles:
+ with ScopeTimer(r'Resolving remote tagfiles', print_start=True, print_end=context.verbose_logger) as t:
+ for source, v in context.tagfiles.items():
+ if isinstance(v, str):
+ continue
+ file = Path(v[0])
+ if file.exists():
+ continue
+ context.verbose(rf'Downloading {source} => {file}')
+ response = requests.get(
+ source,
+ allow_redirects=True,
+ stream=False,
+ timeout=30
+ )
+ with open(file, 'w', encoding='utf-8', newline='\n') as f:
+ f.write(response.text)
+
+ make_temp_file = lambda: tempfile.SpooledTemporaryFile(mode='w+', newline='\n', encoding='utf-8')
+
# run doxygen to generate the xml
if 1:
with ScopeTimer(r'Generating XML files with Doxygen', print_start=True, print_end=context.verbose_logger) as t:
- with tempfile.SpooledTemporaryFile(mode='w+', newline='\n', encoding='utf-8') as file:
+ with make_temp_file() as stdout, make_temp_file() as stderr:
try:
subprocess.run(
- ['doxygen', str(context.doxyfile_path)],
+ [str(context.doxygen_path), str(context.temp_doxyfile_path)],
check=True,
- stdout=file,
- stderr=file,
+ stdout=stdout,
+ stderr=stderr,
cwd=context.input_dir
)
except:
- context.warning(r'Doxygen failed! Output dump:')
- file.seek(0)
- context.info(file.read(), indent=r' ')
+ context.info(r'Doxygen failed!')
+ _dump_output_streams(context, _read_output_streams(stdout, stderr), source=r'Doxygen')
raise
- context.verbose(r'Doxygen output dump:')
- file.seek(0)
- context.verbose(file.read(), indent=r' ')
+ if context.is_verbose() or context.warnings.enabled:
+ outputs = _read_output_streams(stdout, stderr)
+ if context.is_verbose():
+ _dump_output_streams(context, outputs, source=r'Doxygen')
+ if context.warnings.enabled:
+ warnings = _extract_warnings(outputs)
+ for w in warnings:
+ if context.warnings.treat_as_errors:
+ raise Exception(rf'{w} (warning treated as error)')
+ context.warning(w)
# remove the local paths from the tagfile since they're meaningless (and a privacy breach)
if context.tagfile_path is not None and context.tagfile_path.exists():
@@ -828,41 +977,47 @@ def run(config_path='.', output_dir='.', threads=-1, cleanup=True, verbose=False
if 1:
_postprocess_xml(context)
- context.verbose_object(r'Context.highlighting', context.highlighting)
+ context.verbose_object(r'Context.code_blocks', context.code_blocks)
# compile regexes
# (done here because doxygen and xml preprocessing adds additional values to these lists)
- context.highlighting.namespaces = regex_or(context.highlighting.namespaces, pattern_prefix='(?:::)?', pattern_suffix='(?:::)?')
- context.highlighting.types = regex_or(context.highlighting.types, pattern_prefix='(?:::)?', pattern_suffix='(?:::)?')
- context.highlighting.enums = regex_or(context.highlighting.enums, pattern_prefix='(?:::)?')
- context.highlighting.string_literals = regex_or(context.highlighting.string_literals)
- context.highlighting.numeric_literals = regex_or(context.highlighting.numeric_literals)
- context.highlighting.macros = regex_or(context.highlighting.macros)
+ context.code_blocks.namespaces = regex_or(context.code_blocks.namespaces, pattern_prefix='(?:::)?', pattern_suffix='(?:::)?')
+ context.code_blocks.types = regex_or(context.code_blocks.types, pattern_prefix='(?:::)?', pattern_suffix='(?:::)?')
+ context.code_blocks.enums = regex_or(context.code_blocks.enums, pattern_prefix='(?:::)?')
+ context.code_blocks.string_literals = regex_or(context.code_blocks.string_literals)
+ context.code_blocks.numeric_literals = regex_or(context.code_blocks.numeric_literals)
+ context.code_blocks.macros = regex_or(context.code_blocks.macros)
context.autolinks = tuple([(re.compile('(?',
- help=r"sets the number of threads used (default: automatic)"
+ help=r"sets the number of threads to use (default: automatic)"
)
args.add_argument(
r'--m.css',
type=Path,
default=None,
metavar=r'',
- help=r"overrides the version of m.css used for documentation generation",
+ help=r"specifies the version of m.css to use (default: uses the bundled one)",
dest=r'mcss'
)
+ args.add_argument(
+ r'--doxygen',
+ type=Path,
+ default=None,
+ metavar=r'',
+ help=r"specifies the Doxygen executable to use (default: finds Doxygen on system path)",
+ )
+ args.add_argument(
+ r'--werror',
+ action=r'store_true',
+ help=r"always treats warnings as errors regardless of config file settings"
+ )
args.add_argument(r'--nocleanup', action=r'store_true', help=argparse.SUPPRESS)
- args.add_argument(r'--temp_file_name', type=str, default=None, metavar=r'', help=argparse.SUPPRESS)
args = args.parse_args()
verbose = args.verbose
result = run(
@@ -941,9 +1107,10 @@ def main():
cleanup = not args.nocleanup,
verbose = verbose,
mcss_dir = args.mcss,
- temp_file_name = args.temp_file_name,
+ doxygen_path = args.doxygen,
logger=True, # stderr + stdout
- dry_run=args.dry
+ dry_run=args.dry,
+ treat_warnings_as_errors=True if args.werror else None
)
if result is None or bool(result):
sys.exit(0)
diff --git a/poxy/utils.py b/poxy/utils.py
index 9dad07f..e78d702 100644
--- a/poxy/utils.py
+++ b/poxy/utils.py
@@ -74,6 +74,20 @@ def enum_subdirs(root):
+def combine_dicts(x, y):
+ z = x.copy()
+ z.update(y)
+ return z
+
+
+
+_is_uri_regex = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*://.+$')
+def is_uri(s):
+ global _is_uri_regex
+ return _is_uri_regex.fullmatch(str(s)) is not None
+
+
+
#=======================================================================================================================
# REGEX REPLACER
#=======================================================================================================================
diff --git a/setup.py b/setup.py
index 1ed415b..1aa4133 100644
--- a/setup.py
+++ b/setup.py
@@ -41,10 +41,10 @@ def enum_subdirs(root):
setup_args = dict(
name=r'poxy',
- version=r'0.2.1',
+ version=r'0.3.0',
description=r'Documentation generator for C++.',
long_description_content_type=r'text/markdown',
- long_description=f'{README}\n\n{HISTORY}'.strip(),
+ long_description=f'{README.strip()}\n\n{HISTORY.strip()}'.strip(),
license=r'MIT',
packages=find_packages(),
author=r'Mark Gillard',