diff --git a/.bandit b/.bandit new file mode 100644 index 0000000..27970d5 --- /dev/null +++ b/.bandit @@ -0,0 +1,2 @@ +[bandit] +skips = B608,B601 \ No newline at end of file diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..f5717b1 --- /dev/null +++ b/.flake8 @@ -0,0 +1,17 @@ +[flake8] +# Max line length compatible with Black +max-line-length = 100 + +# Directories and files to exclude from linting +exclude = + venv, + .git, + __pycache__, + build, + dist, + tests/* + +# Ignore rules that conflict with Black formatting +ignore = + E203, + W503 diff --git a/.github/workflows/build_linux_win.yml b/.github/workflows/build_linux_win.yml new file mode 100644 index 0000000..d8ff7dd --- /dev/null +++ b/.github/workflows/build_linux_win.yml @@ -0,0 +1,169 @@ +name: Build and Release + +on: + push: + branches: + - main + - develop + - 'feature/**' + - 'bugfix/**' + paths-ignore: + - "**/README.md" + +permissions: + contents: write # Necessary for pushing tags to the repository + +jobs: + versioning: + runs-on: ubuntu-latest + outputs: + VERSION: ${{ steps.get_version.outputs.VERSION }} + BUILD_TYPE: ${{ steps.get_version.outputs.BUILD_TYPE }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Get version information + id: get_version + run: | + COMMIT_HASH=$(git rev-parse --short HEAD) + BUILD_ID=${GITHUB_RUN_NUMBER} + YEAR=$(date +'%y') + WEEK=$(date +'%U') + Z=0 + + if [[ "${GITHUB_REF_NAME}" == "main" ]]; then + VERSION="${YEAR}.${WEEK}.${Z}-${BUILD_ID}" + BUILD_TYPE="stable" + elif [[ "${GITHUB_REF_NAME}" == "develop" ]]; then + VERSION="${YEAR}.${WEEK}.${Z}-${COMMIT_HASH}-rc.${BUILD_ID}" + BUILD_TYPE="rc" + elif [[ "${GITHUB_REF_NAME}" == bugfix/* ]]; then + Z=1 # Increment z for bugfix branches + VERSION="${YEAR}.${WEEK}.${Z}-${COMMIT_HASH}-dev.${BUILD_ID}" + BUILD_TYPE="dev" + elif [[ "${GITHUB_REF_NAME}" == feature/* ]]; then + VERSION="${YEAR}.${WEEK}.${Z}-${COMMIT_HASH}-dev.${BUILD_ID}" + BUILD_TYPE="dev" + else + echo "Unsupported branch type: ${GITHUB_REF_NAME}" + exit 1 + fi + + echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_OUTPUT + + build-windows: + runs-on: windows-latest + needs: versioning + env: + VERSION: ${{ needs.versioning.outputs.VERSION }} + BUILD_TYPE: ${{ needs.versioning.outputs.BUILD_TYPE }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.12.7 + + - name: Install Dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install nuitka + + - name: Build executable + run: | + mkdir build + nuitka --standalone --onefile --output-dir=build/windows --output-filename=tasa.exe src/gui.py ` + --include-data-files=src/low.png=low.png --assume-yes-for-downloads + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: tasa-windows + path: build/windows/tasa.exe + + build-linux: + runs-on: ubuntu-latest + needs: versioning + env: + VERSION: ${{ needs.versioning.outputs.VERSION }} + BUILD_TYPE: ${{ needs.versioning.outputs.BUILD_TYPE }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.12.7 + + - name: Install Dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install nuitka + + - name: Build executable + run: | + mkdir -p build/linux + nuitka --standalone --onefile --output-dir=build/linux --output-filename=tasa src/gui.py \ + --include-data-files=src/low.png=low.png --assume-yes-for-downloads + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: tasa-linux + path: build/linux/tasa + + tag_and_release: + runs-on: ubuntu-latest + needs: + - build-windows + - build-linux + - versioning + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Git user + run: | + git config user.name "${{ github.actor }}" + git config user.email "${{ github.actor }}@users.noreply.github.com" + + - name: Create Git Tag + run: | + git tag -a "v${{ needs.versioning.outputs.VERSION }}" -m "Release v${{ needs.versioning.outputs.VERSION }}" + git push origin "v${{ needs.versioning.outputs.VERSION }}" + + - name: Download Windows artifact + uses: actions/download-artifact@v4 + with: + name: tasa-windows + path: artifacts/tasa-windows + + - name: Download Linux artifact + uses: actions/download-artifact@v4 + with: + name: tasa-linux + path: artifacts/tasa-linux + + - name: Create GitHub Release + uses: ncipollo/release-action@v1 + with: + artifacts: | + artifacts/tasa-windows/tasa.exe + artifacts/tasa-linux/tasa + token: ${{ secrets.GITHUB_TOKEN }} + tag: v${{ needs.versioning.outputs.VERSION }} + name: Release v${{ needs.versioning.outputs.VERSION }} + body: | + This release contains the following: + - Built files: tasa.exe (Windows), tasa (Linux) + - Build Type: ${{ needs.versioning.outputs.BUILD_TYPE }} + draft: true # Set to false if you want it published immediately diff --git a/.github/workflows/code-quality-and-security.yml b/.github/workflows/code-quality-and-security.yml new file mode 100644 index 0000000..1be71e9 --- /dev/null +++ b/.github/workflows/code-quality-and-security.yml @@ -0,0 +1,65 @@ +name: Code Quality and Security Checks + +on: + pull_request: + branches: + - main + - develop + +jobs: + quality_and_security_checks: + runs-on: ubuntu-latest + + steps: + # Step 1: Checkout code + - name: Checkout code + uses: actions/checkout@v3 + + # Step 2: Set up Python environment + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.12.7 + + # Step 3: Install dependencies + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pylint flake8 black bandit mypy pip-audit radon xenon semgrep + + # Step 4: Format code with Black (Check Only) + - name: Check code formatting with Black + run: black --check src + + # Step 5: Lint with Pylint + - name: Run Pylint + run: pylint $(find src -name "*.py" -not -path "./venv/*") + + # Step 6: Check code style with Flake8 + - name: Run Flake8 + run: flake8 src --exclude=venv + + # Step 7: Type Checking with Mypy + - name: Run Mypy + run: mypy src + + # Step 8: Static Analysis for Security Issues with Bandit + - name: Run Bandit + run: bandit -r src --exclude ./venv --ini .bandit + + # Step 9: Dependency Vulnerability Check with pip-audit + - name: Run Pip-audit + run: pip-audit + + # Step 10: Analyze Code Complexity with Radon + - name: Run Radon + run: radon cc src -s -a + + # Step 11: Monitor Code Quality Metrics with Xenon + - name: Run Xenon + run: xenon src --max-absolute B --max-modules B --max-average A + + # Step 12: Lightweight Static Analysis with Semgrep + - name: Run Semgrep + run: semgrep --config auto diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..82f9275 --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..4febab6 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,647 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Resolve imports to .pyi stubs if available. May reduce no-member messages and +# increase not-an-iterable messages. +prefer-stubs=no + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.12 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of positional arguments for function / method. +; max-positional-arguments=5 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + +# Let 'consider-using-join' be raised when the separator to join on would be +# non-empty (resulting in expected fixes of the type: ``"- " + " - +# ".join(items)``) +suggest-join-with-non-empty-separator=yes + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/README.md b/README.md index 5ae0c38..aaa6a6f 100644 --- a/README.md +++ b/README.md @@ -1 +1,9 @@ -# TASA \ No newline at end of file +``` +████████╗ █████╗ ███████╗ █████╗ +╚══██╔══╝██╔══██╗██╔════╝██╔══██╗ + ██║ ███████║███████╗███████║ + ██║ ██╔══██║╚════██║██╔══██║ + ██║ ██║ ██║███████║██║ ██║ + ╚═╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ +``` +### Teenusehaldurite Arva Sisestuse Automatiseerija diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..976ba02 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,2 @@ +[mypy] +ignore_missing_imports = True diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e14d5ca --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +certifi==2024.8.30 +charset-normalizer==3.4.0 +docutils==0.21.2 +idna==3.10 +Kivy==2.3.0 +Kivy-Garden==0.1.5 +Pygments==2.18.0 +requests==2.32.3 +types-requests==2.32.0.20241016 +urllib3==2.2.3 diff --git a/src/db_act.py b/src/db_act.py new file mode 100644 index 0000000..5ad53c9 --- /dev/null +++ b/src/db_act.py @@ -0,0 +1,334 @@ +"""TASA DB Logic""" + +import os +from typing import Callable, List, Tuple +import sqlite3 + + +def initialize_db_connection(db: str) -> Tuple[sqlite3.Connection, sqlite3.Cursor, str]: + """ + Initializes the SQLite database connection and returns the connection, + cursor, and the table name. + + Args: + db (str): The database file path. + + Returns: + Tuple[sqlite3.Connection, sqlite3.Cursor, str]: A tuple containing the database + connection, cursor, and table name. + """ + conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES) + cursor = conn.cursor() + table_name = db.replace(".db", "") + return conn, cursor, table_name + + +def db_exists(db: str, callback: Callable[[str], None] = print) -> bool: + """ + Checks whether the database file exists. + + Args: + db (str): The name of the database file (without '.db' extension). + callback (Callable[[str], None]): A callback function for logging errors. + + Returns: + bool: True if the database file exists, False otherwise. + """ + if os.path.exists(f"{db}.db"): + return True + callback("Project doesn't exist!") + return False + + +def create_db(db: str, callback: Callable[[str], None] = print) -> None: + """ + Creates the database and tables required for the application. + + Args: + db (str): The name of the database file. + callback (Callable): A function for logging messages, default is `print`. + """ + conn, cursor, table_name = initialize_db_connection(db) + + try: + create_base_tables(cursor) + create_initial_table(cursor, table_name) + create_env_tables(cursor, table_name) + conn.commit() + callback(f"Database and tables created successfully in: {db}") + except sqlite3.Error as error: + callback(f"Error creating database tables: {error}") + finally: + cursor.close() + conn.close() + + +def create_base_tables(cursor: sqlite3.Cursor) -> None: + """ + Creates the base tables like 'last_run'. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + """ + cursor.execute( + """ + CREATE TABLE IF NOT EXISTS last_run ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + last_sync_timestamp TEXT, + status TEXT + ) + """ + ) + + cursor.execute( + """ + INSERT INTO last_run (last_sync_timestamp, status) + VALUES (datetime('now'), 'initial') + """ + ) + + +def create_initial_table(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the initial table for the project. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The base name of the table. + """ + cursor.execute( + f""" + CREATE TABLE IF NOT EXISTS {table_name}_initial ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + locale TEXT, + title TEXT, + tags TEXT, + path TEXT, + content TEXT + ) + """ + ) + + +def create_env_tables(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the environment-specific tables and triggers. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The base name of the table. + """ + envs = ["_dev", "_test", "_prod"] + for env in envs: + env_table_name = f"{table_name}{env}" + create_main_env_table(cursor, env_table_name) + create_update_trigger(cursor, env_table_name) + create_related_tables(cursor, env_table_name) + + +def create_main_env_table(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the main environment-specific table. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The environment-specific table name. + """ + cursor.execute( + f""" + CREATE TABLE IF NOT EXISTS {table_name} ( + exp_article_id INTEGER, + article_id INTEGER PRIMARY KEY, + locale TEXT, + title TEXT, + tags TEXT, + path TEXT, + content TEXT, + status TEXT, + modified_timestamp TEXT + ) + """ + ) + + +def create_update_trigger(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates a trigger to update the modified timestamp on the environment table. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The environment-specific table name. + """ + cursor.execute( + f""" + CREATE TRIGGER IF NOT EXISTS update_modified_timestamp_{table_name} + AFTER UPDATE ON {table_name} + BEGIN + UPDATE {table_name} + SET modified_timestamp = datetime('now') + WHERE article_id = NEW.article_id; + END + """ + ) + + +def create_related_tables(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the related tables for the environment. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The environment-specific table name. + """ + related_tables = { + "arva_institution": f""" + id INTEGER, + pageId INTEGER, + name TEXT, + url TEXT, + isResponsible BOOLEAN, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_legal_act": f""" + id INTEGER, + pageId INTEGER, + title TEXT, + url TEXT, + legalActType TEXT, + globalId REAL, + groupId INTEGER, + versionStartDate TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_page_contact": f""" + id INTEGER, + contactId INTEGER, + pageId INTEGER, + role TEXT, + firstName TEXT, + lastName TEXT, + company TEXT, + email TEXT, + phone TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_related_pages": f""" + id INTEGER, + pageId INTEGER, + title TEXT, + locale TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_service": f""" + id INTEGER, + pageId INTEGER, + name TEXT, + url TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + } + + for table_suffix, schema in related_tables.items(): + cursor.execute( + f""" + CREATE TABLE IF NOT EXISTS {table_name}_{table_suffix} ( + {schema} + ) + """ + ) + + +def copy_table( + db: str, source_env: str, target_env: str, callback: Callable[[str], None] = print +) -> None: + """ + Copies data from a source table to a target table in the same SQLite database, + including associated related tables. + + Args: + db (str): The name of the database file. + source_env (str): The source environment identifier (e.g., 'dev'). + target_env (str): The target environment identifier (e.g., 'prod'). + callback (Callable): A function for error or status messages, default is `print`. + """ + conn, cursor, table_name = initialize_db_connection(db) + + try: + # Copy main table data + _copy_main_table(cursor, table_name, source_env, target_env) + + # Copy related tables + related_tables = [ + "arva_institution", + "arva_legal_act", + "arva_page_contact", + "arva_related_pages", + "arva_service", + ] + _copy_related_tables(cursor, table_name, source_env, target_env, related_tables) + + # Commit changes + conn.commit() + callback("Data copied successfully.") + except sqlite3.Error as error: + callback(f"An error occurred: {error}") + + +def _copy_main_table( + cursor: sqlite3.Cursor, table_name: str, source_env: str, target_env: str +) -> None: + """ + Copies data from the main source table to the target table. + + Args: + cursor: SQLite database cursor. + table_name (str): Base table name. + source_env (str): Source environment identifier. + target_env (str): Target environment identifier. + """ + source_table = f"{table_name}_{source_env}" + target_table = f"{table_name}_{target_env}" + + cursor.execute( + f""" + INSERT INTO {target_table} (article_id, locale, title, tags, path, content) + SELECT article_id, locale, title, tags, path, content + FROM {source_table} + """ + ) + + +def _copy_related_tables( + cursor: sqlite3.Cursor, + table_name: str, + source_env: str, + target_env: str, + related_tables: List[str], +) -> None: + """ + Copies data from related source tables to the target tables. + + Args: + cursor: SQLite database cursor. + table_name (str): Base table name. + source_env (str): Source environment identifier. + target_env (str): Target environment identifier. + related_tables (list): List of related table names to copy. + """ + for related_table in related_tables: + source_related_table = f"{table_name}_{source_env}_{related_table}" + target_related_table = f"{table_name}_{target_env}_{related_table}" + + # Dynamically fetch column names starting from the 3rd column + columns = cursor.execute( + f"PRAGMA table_info({source_related_table})" + ).fetchall()[2:] + column_names = ", ".join(column[1] for column in columns) + + cursor.execute( + f""" + INSERT INTO {target_related_table} (id, pageId, {column_names}) + SELECT id, pageId, {column_names} + FROM {source_related_table} + """ + ) diff --git a/src/gui.py b/src/gui.py new file mode 100644 index 0000000..82d1dfd --- /dev/null +++ b/src/gui.py @@ -0,0 +1,630 @@ +"""TASA GUI""" + +import os +import sys +import threading + +# pylint: disable=no-member +from typing import Any, Callable, Dict +from kivy.app import App +from kivy.uix.boxlayout import BoxLayout +from kivy.uix.label import Label +from kivy.uix.button import Button +from kivy.uix.textinput import TextInput +from kivy.uix.spinner import Spinner +from kivy.uix.popup import Popup +from kivy.uix.image import Image +from kivy.core.window import Window +from kivy.clock import Clock +import db_act +import helper +import prog + +# Constants for window dimensions +WINDOW_WIDTH = 400 +WINDOW_HEIGHT = 500 +Window.size = (WINDOW_WIDTH, WINDOW_HEIGHT) + + +def enforce_fixed_size(window: Any, width: int, height: int) -> None: + """ + Ensure the application window remains a fixed size. + + Args: + window (Any): The application window object. + width (int): Desired window width. + height (int): Desired window height. + """ + window.size = (width, height) + + +Window.bind( + on_resize=lambda instance, w, h: enforce_fixed_size( + instance, WINDOW_WIDTH, WINDOW_HEIGHT + ) +) + + +def resource_path(relative_path: str) -> str: + """ + Get the absolute path to a resource, whether running as a script or executable. + + Args: + relative_path (str): The relative path to the resource. + + Returns: + str: The absolute path to the resource. + """ + if hasattr(sys, "_MEIPASS"): # For bundled executables + base_path = sys._MEIPASS # pylint: disable=protected-access + else: # For development + base_path = os.path.dirname(os.path.abspath(__file__)) + + absolute_path = os.path.join(base_path, relative_path) + + if not os.path.exists(absolute_path): + print(f"[ERROR] Resource not found at path: {absolute_path}") + + return absolute_path + + +class LoadingPopup(Popup): + """Popup for displaying a loading indicator.""" + + def __init__(self, **kwargs: Any) -> None: + """ + Initialize the loading popup with a title and layout. + + Args: + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(**kwargs) + self.size_hint = (0.5, 0.3) + self.auto_dismiss = False + self.title = "Loading" + self.add_widget(self._create_layout()) + + @staticmethod + def _create_layout() -> BoxLayout: + """ + Create the layout for the loading popup. + + Returns: + BoxLayout: The layout containing the loading message. + """ + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget( + Label(text="Please wait...", font_size=18, size_hint=(1, 0.8)) + ) + return layout + + +class InputPopup(Popup): + """Popup for accepting user input.""" + + def __init__( + self, title: str, hint_text: str, callback: Callable[[str], None], **kwargs: Any + ) -> None: + """ + Initialize the input popup with a title, text input, and submit button. + + Args: + title (str): Title of the popup. + hint_text (str): Placeholder text for the input field. + callback (Callable[[str], None]): Function to handle submitted input. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title=title, size_hint=(0.8, 0.35), **kwargs) + self.callback = callback + self.input_field = TextInput( + hint_text=hint_text, multiline=False, size_hint=(1, None), height=40 + ) + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.input_field) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of user input. + + Args: + instance (Button): The submit button instance. + """ + self.callback(self.input_field.text) + self.dismiss() + + +class CopyDataPopup(Popup): + """Popup for copying data between tables.""" + + def __init__( + self, callback: Callable[[str, str, str], None], **kwargs: Any + ) -> None: + """ + Initialize the CopyDataPopup with required fields and submit action. + + Args: + callback (Callable[[str, str, str], None]): Function to call when data is submitted. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title="Copy Data", size_hint=(0.8, 0.55), **kwargs) + self.callback = callback + self.project_name = TextInput( + hint_text="Project Name", multiline=False, size_hint=(1, None), height=40 + ) + self.source_env = Spinner( + text="Source Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + self.target_env = Spinner( + text="Target Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.project_name) + layout.add_widget(self.source_env) + layout.add_widget(self.target_env) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of data from the popup. + + Args: + instance (Button): The button instance that triggered the submission. + """ + self.callback( + self.project_name.text, self.source_env.text, self.target_env.text + ) + self.dismiss() + + +class PullWorkflowPopup(Popup): + """Popup for pulling data from ARVA.""" + + def __init__( + self, callback: Callable[[str, str, str, str], None], **kwargs: Any + ) -> None: + """ + Initialize the PullWorkflowPopup with required fields and submit action. + + Args: + callback (Callable[[str, str, str, str], None]): + Function to call when data is submitted. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title="Pull Data from ARVA", size_hint=(0.8, 0.65), **kwargs) + self.callback = callback + self.project_name = TextInput( + hint_text="Project Name", multiline=False, size_hint=(1, None), height=40 + ) + self.source_env = Spinner( + text="Source Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + self.token_input = TextInput( + hint_text="ARVA Token", multiline=False, size_hint=(1, None), height=40 + ) + self.article_id_input = TextInput( + hint_text="Article IDs (comma-separated)", + multiline=False, + size_hint=(1, None), + height=40, + ) + + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.project_name) + layout.add_widget(self.token_input) + layout.add_widget(self.article_id_input) + layout.add_widget(self.source_env) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of data from the popup. + + Args: + instance (Button): The button instance that triggered the submission. + """ + self.callback( + self.project_name.text, + self.source_env.text, + self.token_input.text, + self.article_id_input.text, + ) + self.dismiss() + + +class InsertWorkflowPopup(Popup): + """Popup for inserting data into ARVA.""" + + def __init__( + self, callback: Callable[[str, str, str], None], **kwargs: Any + ) -> None: + """ + Initialize the InsertWorkflowPopup with required fields and submit action. + + Args: + callback (Callable[[str, str, str], None]): Function to call when data is submitted. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title="Insert Data to ARVA", size_hint=(0.8, 0.55), **kwargs) + self.callback = callback + self.project_name = TextInput( + hint_text="Project Name", multiline=False, size_hint=(1, None), height=40 + ) + self.target_env = Spinner( + text="Target Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + self.token_input = TextInput( + hint_text="ARVA Token", multiline=False, size_hint=(1, None), height=40 + ) + + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.project_name) + layout.add_widget(self.token_input) + layout.add_widget(self.target_env) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of data from the popup. + + Args: + instance (Button): The button instance that triggered the submission. + """ + self.callback( + self.project_name.text, self.target_env.text, self.token_input.text + ) + self.dismiss() + + +class MainScreen(BoxLayout): + """Main application screen.""" + + def __init__(self, **kwargs: Dict[str, Any]) -> None: + """ + Initialize the main application screen. + + Args: + **kwargs (Dict[str, Any]): Additional keyword arguments for the layout. + """ + super().__init__(orientation="vertical", spacing=10, padding=20, **kwargs) + self.loading_popup = LoadingPopup() + self._create_ui() + + def _create_ui(self) -> None: + """Create the main UI components.""" + # Add logo at the top + self.add_widget( + Image( + source=resource_path("low.png"), + size_hint=(1, 0.3), + ) + ) + + # Add title + self.add_widget(Label(text="TASA", font_size=28, size_hint=(1, 0.1))) + + # Add buttons + buttons = [ + ("Create a DB File", self.create_db), + ("Copy Data Between Tables", self.copy_data), + ("Pull Data from ARVA", self.pull_data), + ("Insert Data to ARVA", self.insert_data), + ] + for label, action in buttons: + self.add_widget(self._create_button(label, action)) + + # Add log area at the bottom + self.log_output = self._create_log_area() + self.add_widget(self.log_output) + + @staticmethod + def _create_button(label: str, action: Callable[[Button], None]) -> Button: + """ + Create a standardized button with the given label and action. + + Args: + label (str): The text to display on the button. + action (Callable[[Button], None]): The function to call when the button is pressed. + + Returns: + Button: The created button widget. + """ + button = Button(text=label, size_hint=(1, None), height=50) + button.bind(on_press=action) + return button + + @staticmethod + def _create_log_area() -> TextInput: + """ + Create a text input widget to serve as the log area. + + Returns: + TextInput: The text input widget configured as a log area. + """ + return TextInput( + multiline=True, + readonly=True, + size_hint=(1, 0.4), + background_color=[0.9, 0.9, 0.9, 1], + foreground_color=[0, 0, 0, 1], + ) + + def log_message(self, message: str) -> None: + """ + Log a message to the log area asynchronously. + + Args: + message (str): The message to log. + """ + Clock.schedule_once(lambda dt: self._append_message(message)) + + def _append_message(self, message: str) -> None: + """ + Append a message to the log output. + + Args: + message (str): The message to append. + """ + self.log_output.text += f"{message}\n" + + def show_loading(self) -> None: + """Show the loading popup.""" + self.loading_popup.open() + + def hide_loading(self) -> None: + """Hide the loading popup.""" + self.loading_popup.dismiss() + + def create_db(self, _instance: Button) -> None: + """Open popup to create a database.""" + popup = InputPopup( + title="Enter New Project Name", + hint_text="Project Name", + callback=self._handle_create_db, + ) + popup.open() + + def _handle_create_db(self, project_name: str) -> None: + """ + Handle the creation of a new database. + + Args: + project_name (str): The name of the new project/database. + """ + if helper.valid_project_name(project_name, callback=self.log_message): + db_path = f"{project_name}.db" + if not db_act.db_exists(project_name, callback=self.log_message): + self.show_loading() + threading.Thread( + target=self._perform_create_db, args=(db_path,) + ).start() + else: + self.log_message(f"Database '{db_path}' already exists!") + + def _perform_create_db(self, db_path: str) -> None: + """ + Perform the database creation in a separate thread. + + Args: + db_path (str): The file path for the new database. + """ + try: + db_act.create_db(db_path, callback=self.log_message) + self.log_message(f"Database '{db_path}' created successfully!") + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + def copy_data(self, _instance: Button) -> None: + """Copy data between tables.""" + popup = CopyDataPopup(callback=self._handle_copy_data) + popup.open() + + def _handle_copy_data( + self, project_name: str, source_env: str, target_env: str + ) -> None: + """ + Handle the copy data action from the user input. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment to copy data from. + target_env (str): The target environment to copy data to. + """ + if ( + helper.valid_project_name(project_name, callback=self.log_message) + and db_act.db_exists(project_name, callback=self.log_message) + and helper.check_target_env(source_env, callback=self.log_message) + and helper.check_target_env(target_env, callback=self.log_message) + ): + self.show_loading() + threading.Thread( + target=self._perform_copy_data, + args=(project_name, source_env, target_env), + ).start() + + def _perform_copy_data( + self, project_name: str, source_env: str, target_env: str + ) -> None: + """ + Perform the data copy operation in a separate thread. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment. + target_env (str): The target environment. + """ + try: + db_act.copy_table( + f"{project_name}.db", source_env, target_env, callback=self.log_message + ) + self.log_message( + f"Data copied from {source_env} to {target_env} successfully!" + ) + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + def pull_data(self, _instance: Button) -> None: + """Pull data from ARVA.""" + popup = PullWorkflowPopup(callback=self._handle_pull_data) + popup.open() + + def _handle_pull_data( + self, project_name: str, source_env: str, token: str, article_ids: str + ) -> None: + """ + Handle the pull data action from the user input. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment to pull data from. + token (str): The ARVA authentication token. + article_ids (str): Comma-separated list of article IDs. + """ + if ( + helper.valid_project_name(project_name, callback=self.log_message) + and db_act.db_exists(project_name, callback=self.log_message) + and helper.check_target_env(source_env, callback=self.log_message) + ): + self.show_loading() + threading.Thread( + target=self._perform_pull_data, + args=(project_name, source_env, token, article_ids), + ).start() + + def _perform_pull_data( + self, project_name: str, source_env: str, token: str, article_ids: str + ) -> None: + """ + Perform the data pull operation in a separate thread. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment. + token (str): The ARVA authentication token. + article_ids (str): Comma-separated list of article IDs. + """ + try: + config = { + "db": f"{project_name}.db", + "env": source_env, + "bearer_token": token, + "graphql_url": helper.get_env_url(source_env), + } + prog.get_arva_records(config, article_ids, callback=self.log_message) + # self.log_message("Data pulled from ARVA successfully!") + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + def insert_data(self, _instance: Button) -> None: + """Insert data into ARVA.""" + popup = InsertWorkflowPopup(callback=self._handle_insert_data) + popup.open() + + def _handle_insert_data( + self, project_name: str, target_env: str, token: str + ) -> None: + """ + Handle the insert data action from the user input. + + Args: + project_name (str): The name of the project/database. + target_env (str): The target environment to insert data into. + token (str): The ARVA authentication token. + """ + if ( + helper.valid_project_name(project_name, callback=self.log_message) + and db_act.db_exists(project_name, callback=self.log_message) + and helper.check_target_env(target_env, callback=self.log_message) + ): + self.show_loading() + threading.Thread( + target=self._perform_insert_data, args=(project_name, target_env, token) + ).start() + + def _perform_insert_data( + self, project_name: str, target_env: str, token: str + ) -> None: + """ + Perform the data insertion operation in a separate thread. + + Args: + project_name (str): The name of the project/database. + target_env (str): The target environment. + token (str): The ARVA authentication token. + """ + graphql_url = helper.get_env_url(target_env) + if not graphql_url: + self.log_message( + f"Error: GraphQL URL for environment '{target_env}' not found." + ) + self.hide_loading() + return + + try: + prog.process_records( + f"{project_name}.db", + target_env, + token, + graphql_url, # Now guaranteed to be a valid string + callback=self.log_message, + ) + self.log_message("Data inserted into ARVA successfully!") + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + +class TASAApp(App): + """Main application class.""" + + def build(self) -> MainScreen: + """Build and return the main screen of the application.""" + return MainScreen() + + +if __name__ == "__main__": + TASAApp().run() diff --git a/src/helper.py b/src/helper.py new file mode 100644 index 0000000..670918e --- /dev/null +++ b/src/helper.py @@ -0,0 +1,97 @@ +"""TASA helpers""" + +import os +import re +from typing import Callable, Optional + + +def valid_project_name(name: str, callback=print) -> bool: + """ + Validates the given project name based on specified rules. + + Args: + name (str): The project name to validate. + callback (Callable): Function to handle error messages, default is `print`. + + Returns: + bool: True if the project name is valid, False otherwise. + """ + if not name: + callback("Name can't be empty!") + return False + + if not re.match(r"^[a-zA-Z0-9_]+$", name): + callback("Invalid project name (only alphanumeric characters and underscores)!") + return False + + if name[0].isdigit(): + callback("Project name cannot start with a number!") + return False + + if name[0] == "_": + callback("Project name cannot start with an underscore!") + return False + + if name[-1] == "_": + callback("Project name cannot end with an underscore!") + return False + + return True + + +def get_env_url(env: str) -> Optional[str]: + """ + Retrieves the GraphQL URL for the given environment. + + Args: + env (str): The environment identifier (e.g., 'dev', 'test', 'prod'). + + Returns: + Optional[str]: The corresponding GraphQL URL or None if the environment is invalid. + """ + envs = { + "dev": "https://arva-main.dev.riaint.ee/graphql", + "test": "https://arva-main.test.riaint.ee/graphql", + "prod": "", + } + return envs.get(env) + + +def check_target_env(target_env: str, callback: Callable[[str], None] = print) -> bool: + """ + Validates the selected target environment. + + Args: + target_env (str): The environment identifier to validate. + callback (Callable[[str], None]): A callback function for logging errors. + + Returns: + bool: True if the target environment is valid, False otherwise. + """ + if target_env in {"dev", "test", "prod"}: + return True + callback("Invalid input. Please choose from 'dev', 'test', or 'prod'.") + return False + + +def get_arva_token(target_env: str) -> str: + """ + Retrieves the ARVA token for the given target environment. + + Args: + target_env (str): The target environment identifier. + + Returns: + str: The ARVA token for the target environment. + """ + target_env_upper = target_env.upper() + token = os.getenv(f"ARVA_TOKEN_{target_env_upper}") + + if token: + print(f"ARVA_TOKEN_{target_env_upper} found in environment.") + return token + + token = input("Enter ARVA token: ").strip() + os.environ[f"ARVA_TOKEN_{target_env_upper}"] = token + print(f"ARVA_TOKEN_{target_env_upper} saved to the environment.") + return token diff --git a/src/low.png b/src/low.png new file mode 100644 index 0000000..2deae8b Binary files /dev/null and b/src/low.png differ diff --git a/src/main.py b/src/main.py new file mode 100644 index 0000000..f511426 --- /dev/null +++ b/src/main.py @@ -0,0 +1,169 @@ +"""TASA Main""" + +import os +import helper +import prog +import db_act + + +def display_actions() -> None: + """ + Displays the main menu options for the TASA application. + """ + ascii_art = ( + "████████╗ █████╗ ███████╗ █████╗ \n" + "╚══██╔══╝██╔══██╗██╔════╝██╔══██╗\n" + " ██║ ███████║███████╗███████║\n" + " ██║ ██╔══██║╚════██║██╔══██║\n" + " ██║ ██║ ██║███████║██║ ██║\n" + " ╚═╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝\n" + ) + print(ascii_art) + print("Welcome to TASA, please make your selection.") + print("Enter 'Q' to quit.\n") + menu_options = [ + "0. Display available selections.", + "1. Create a DB file for a project.", + "2. Copy data from initial table to another (dev, test, prod).", + "3. Pull data from ARVA.", + "4. Insert data to ARVA.", + ] + for option in menu_options: + print(option) + + +def main() -> None: + """ + Main function to handle the TASA application's user interactions. + """ + display_actions() + + while True: + user_input = input("\nMake a selection: ").strip().upper() + + if user_input == "Q": + print("Exiting program.") + break + if user_input == "0": + display_actions() + elif user_input == "1": + handle_create_db() + elif user_input == "2": + handle_copy_table() + elif user_input == "3": + handle_pull_data() + elif user_input == "4": + handle_insert_data() + else: + print("Invalid selection. Please choose 1, 2, 3, 4, or Q.") + + +def handle_create_db() -> None: + """ + Handles the creation of a new database file for a project. + """ + while True: + db_name = input("\nEnter new project name: ").strip().lower() + if helper.valid_project_name(db_name): + if not os.path.exists(f"{db_name}.db"): + db_act.create_db(f"{db_name}.db") + print(f"Database '{db_name}.db' created successfully.") + break + print("Project already exists!") + + +def handle_copy_table() -> None: + """ + Handles copying data from the initial table to another environment. + """ + while True: + db_name = input("\nEnter existing project name: ").strip().lower() + if helper.valid_project_name(db_name) and db_act.db_exists(db_name): + while True: + source_env = ( + input("\nSelect source table (dev, test, prod): ").strip().lower() + ) + target_env = ( + input("\nSelect target table (dev, test, prod): ").strip().lower() + ) + if helper.check_target_env(source_env) and helper.check_target_env( + target_env + ): + db_act.copy_table(f"{db_name}.db", source_env, target_env) + print( + f"Data copied from {source_env} to {target_env} for project '{db_name}'." + ) + return + + +def handle_pull_data() -> None: + """ + Handles pulling data from ARVAfor a + specified project and environment. + """ + while True: + db_name = input("\nEnter existing project name: ").strip().lower() + if helper.valid_project_name(db_name) and db_act.db_exists(db_name): + while True: + target_env = ( + input("\nSelect source environment (dev, test, prod): ") + .strip() + .lower() + ) + if helper.check_target_env(target_env): + token = helper.get_arva_token(target_env) + article_ids = input( + "Enter ARVA article ID(s), separated by commas: " + ).strip() + + config = { + "db": f"{db_name}.db", + "env": target_env, + "bearer_token": token, + "graphql_url": helper.get_env_url(target_env), + } + + prog.get_arva_records(config, article_ids) + print( + f"Data pulled for project '{db_name}' in environment '{target_env}'." + ) + return + + +def handle_insert_data() -> None: + """ + Handles inserting data into ARVA for a specified project and environment. + """ + while True: + db_name = input("\nEnter existing project name: ").strip().lower() + if helper.valid_project_name(db_name) and db_act.db_exists(db_name): + while True: + target_env = ( + input("\nSelect target environment (dev, test, prod): ") + .strip() + .lower() + ) + if helper.check_target_env(target_env): + token = helper.get_arva_token(target_env) + graphql_url = helper.get_env_url(target_env) + + if not graphql_url: + print( + f"Error: GraphQL URL for environment '{target_env}' not found." + ) + return + + prog.process_records( + f"{db_name}.db", + target_env, + token, + graphql_url, # Ensure this is a valid string + ) + print( + f"Data inserted for project '{db_name}' in environment '{target_env}'." + ) + return + + +if __name__ == "__main__": + main() diff --git a/src/prog.py b/src/prog.py new file mode 100644 index 0000000..8a60413 --- /dev/null +++ b/src/prog.py @@ -0,0 +1,942 @@ +"""TASA Main Logic""" + +from datetime import datetime +from typing import Callable, List, Dict, Tuple, Optional, Any +import sqlite3 +import requests +import urllib3 +import db_act + + +# Disable InsecureRequestWarning +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + +def get_last_run_info(cursor: sqlite3.Cursor) -> Optional[datetime]: + """ + Retrieves the last synchronization timestamp from the last_run table. + + Args: + cursor: SQLite database cursor. + + Returns: + datetime or None: The last synchronization datetime, + or None if not found or an error occurs. + """ + try: + cursor.execute( + "SELECT * FROM last_run ORDER BY last_sync_timestamp DESC LIMIT 1" + ) + last_run_info = cursor.fetchone() + + if last_run_info: + last_sync_timestamp = last_run_info[1] + if isinstance(last_sync_timestamp, str): + return datetime.strptime(last_sync_timestamp, "%Y-%m-%d %H:%M:%S") + + print("No last run information found.") + return None + + except (ValueError, IndexError) as error: + print(f"Error retrieving last run information: {str(error)}") + return None + + +def fetch_all_records(cursor: sqlite3.Cursor, env_table_name: str) -> List[Tuple]: + """ + Fetches all records from the specified environment table that meet the synchronization criteria. + + Args: + cursor: SQLite database cursor. + table_name (str): The base name of the table. + env (str): The environment identifier (e.g., 'dev', 'test', 'prod'). + + Returns: + list: A list of rows retrieved from the table. + """ + try: + last_sync_datetime = get_last_run_info(cursor) + + cursor.execute( + f""" + SELECT article_id, locale, title, tags, path, content + FROM {env_table_name} + WHERE modified_timestamp IS NULL OR modified_timestamp > ? + """, + (last_sync_datetime,), + ) + return cursor.fetchall() + + except sqlite3.Error as error: + print(f"Error fetching records: {str(error)}") + return [] + + +def insert_arva_records( + db: str, env: str, response_data: Dict, callback: Callable[[str], None] = print +) -> None: + """ + Inserts ARVA records into the appropriate tables in the SQLite database. + + Args: + db (str): The database file name. + env (str): The environment identifier (e.g., 'dev', 'test', 'prod'). + response_data (dict): The ARVA data to insert. + callback (Callable): A function for logging messages, default is `print`. + """ + conn = sqlite3.connect(db) + cursor = conn.cursor() + table_name = db.replace(".db", "") + + try: + # Insert `pages` data and get page_id + env_table_name = f"{table_name}_{env}" + page_id = _insert_page_data(cursor, env_table_name, response_data) + + # Insert related ARVA data + _insert_arva_institution(cursor, env_table_name, page_id, response_data) + _insert_arva_legal_act(cursor, env_table_name, page_id, response_data) + _insert_arva_page_contact(cursor, env_table_name, page_id, response_data) + _insert_arva_related_pages(cursor, env_table_name, page_id, response_data) + _insert_arva_service(cursor, env_table_name, page_id, response_data) + + # Commit changes and notify success + conn.commit() + callback( + f"Data saved successfully in the database for article ID {page_id} " + f"in environment: {env}" + ) + + except sqlite3.Error as error: + callback(f"Database error: {str(error)}") + finally: + cursor.close() + conn.close() + + +def _insert_page_data( + cursor: sqlite3.Cursor, env_table_name: str, response_data: Dict +) -> int: + """Inserts page data into the main table and returns the page_id.""" + page_data = response_data["data"]["pages"]["single"] + tags = ";".join( + tag["title"] for tag in page_data["tags"] if isinstance(tag["title"], str) + ) + + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name} (article_id, locale, title, tags, path, content) + VALUES (?, ?, ?, ?, ?, ?) + """, + ( + page_data["id"], + page_data["locale"], + page_data["title"], + tags, + page_data["path"], + page_data["content"], + ), + ) + return page_data["id"] + + +def _insert_arva_institution( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA institution data.""" + for institution in response_data["data"]["arvaInstitution"][ + "getArvaInstitutionsForPage" + ]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_institution ( + id, pageId, name, url, isResponsible + ) + VALUES (?, ?, ?, ?, ?) + """, + ( + institution["id"], + page_id, + institution["name"], + institution["url"], + bool(institution["isResponsible"]), + ), + ) + + +def _insert_arva_legal_act( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA legal act data.""" + for legal_act in response_data["data"]["arvaLegalAct"]["getLegalActsForPage"]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_legal_act ( + id, pageId, title, url, legalActType, globalId, groupId, versionStartDate + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + legal_act["id"], + page_id, + legal_act["title"], + legal_act["url"], + legal_act["legalActType"], + legal_act["globalId"], + legal_act["groupId"], + legal_act["versionStartDate"], + ), + ) + + +def _insert_arva_page_contact( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA page contact data.""" + for contact in response_data["data"]["arvaPageContact"][ + "getArvaPageContactForPage" + ]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_page_contact ( + id, contactId, pageId, role, firstName, lastName, company, email, phone + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + contact["id"], + contact["contactId"], + page_id, + contact["role"], + contact["firstName"], + contact["lastName"], + contact["company"], + contact["email"], + contact["phone"], + ), + ) + + +def _insert_arva_related_pages( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA related pages data.""" + for related_page in response_data["data"]["arvaRelatedPages"][ + "getRelatedPagesForPage" + ]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_related_pages (id, pageId, title, locale) + VALUES (?, ?, ?, ?) + """, + ( + related_page["id"], + page_id, + related_page["title"], + related_page["locale"], + ), + ) + + +def _insert_arva_service( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA service data.""" + for service in response_data["data"]["arvaService"]["getArvaServicesForPage"]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_service (id, pageId, name, url) + VALUES (?, ?, ?, ?) + """, + ( + service["id"], + page_id, + service["name"], + service["url"], + ), + ) + + +def get_arva_records( + config: Dict[str, Any], article_ids: str, callback: Callable[[str], None] = print +) -> None: + """ + Fetches ARVA records for the specified article IDs from a GraphQL API + and stores them in the database. + + Args: + config (dict): Configuration containing the database, environment, + authentication, and GraphQL URL. + article_ids (str): Comma-separated string of article IDs. + callback (Callable): A function for logging messages, default is `print`. + """ + graphql_url = config["graphql_url"] + headers = { + "Authorization": f"Bearer {config['bearer_token']}", + "Content-Type": "application/json", + } + + arva_records_query = """ + query($id: Int!) { + pages { + single(id: $id) { + id + title + tags { + id + title + } + path + content + locale + editor + isPublished + authorId + authorName + authorEmail + creatorId + creatorName + creatorEmail + createdAt + updatedAt + } + history(id: $id) { + trail { + versionId + versionDate + authorId + authorName + actionType + valueBefore + valueAfter + } + total + } + } + arvaInstitution { + getArvaInstitutionsForPage(pageId: $id) { + id + name + url + isResponsible + } + } + arvaLegalAct { + getLegalActsForPage(pageId: $id) { + id + globalId + groupId + title + url + versionStartDate + createdAt + updatedAt + legalActType + } + } + arvaPageContact { + getArvaPageContactForPage(pageId: $id) { + id + role + firstName + lastName + contactId + company + email + phone + } + } + arvaRelatedPages { + getRelatedPagesForPage(pageId: $id) { + id + title + locale + } + } + arvaService { + getArvaServicesForPage(pageId: $id) { + id + name + url + } + } + arvaSdgMeta { + getArvaSdgMetaForPage(pageId: $id) { + id + isSdg + country + serviceTypeCode + nuts3Code + lauCode + annexiTopicsCode + annexiiTopicsCode + annexiiiServiceCode + } + } + } + """ + + for article_id in [int(id.strip()) for id in article_ids.split(",")]: + variables = {"id": article_id} + payload = {"query": arva_records_query, "variables": variables} + + try: + response = requests.post( + graphql_url, + json=payload, + headers=headers, + verify=False, + timeout=10, # nosec: + ) + response_data = response.json() + + # Check for errors in the response + if "errors" in response_data: + unique_errors = set() # Use a set to collect unique error messages + for error in response_data["errors"]: + error_message = error.get("message", "Unknown error") + unique_errors.add(error_message) # Add error to the set + + # Log unique error messages + for unique_error in unique_errors: + callback( + f"Error fetching data for article ID {article_id}: {unique_error}" + ) + + continue # Skip processing this article_id if errors are present + + # Process valid data + if response.status_code == 200 and "data" in response_data: + insert_arva_records(config["db"], config["env"], response_data) + callback(f"Records for article ID {article_id} have been inserted.") + else: + callback( + f"Failed to fetch data for article ID {article_id}: {response.status_code}" + ) + + except requests.RequestException as error: + callback(f"Error fetching data for article ID {article_id}: {str(error)}") + + +def get_graphql_mutations() -> Tuple[str, str]: + """ + Retrieves the GraphQL mutations for creating a page and handling follow-ups. + + Returns: + Tuple[str, str]: The create mutation and follow-up mutation strings. + """ + create_mutation = """ + mutation ( + $content: String!, + $description: String!, + $editor: String!, + $isPrivate: Boolean!, + $isPublished: Boolean!, + $locale: String!, + $path: String!, + $publishEndDate: Date, + $publishStartDate: Date, + $scriptCss: String, + $scriptJs: String, + $tags: [String]!, + $title: String! + ) { + pages { + create( + content: $content, + description: $description, + editor: $editor, + isPrivate: $isPrivate, + isPublished: $isPublished, + locale: $locale, + path: $path, + publishEndDate: $publishEndDate, + publishStartDate: $publishStartDate, + scriptCss: $scriptCss, + scriptJs: $scriptJs, + tags: $tags, + title: $title + ) { + responseResult { + succeeded + errorCode + slug + message + __typename + } + page { + id + updatedAt + __typename + } + __typename + } + __typename + } + } + """ + follow_up_mutation = """ + mutation ( + $pageId: Int! + $institutionInput: [ArvaInstitutionInput] + $legalActInput: [ArvaLegalActInput!]! + $pageContactInput: [ArvaPageContactInput!] + $relatedPagesInput: [ArvaRelatedPagesInput!] + $serviceInput: [ArvaServiceInput!] + ) { + arvaInstitution { + saveArvaInstitutionsForPage(pageId: $pageId, input: $institutionInput) { + succeeded + message + __typename + } + } + arvaLegalAct { + createArvaLegalAct(pageId: $pageId, input: $legalActInput) { + succeeded + message + __typename + } + } + arvaPageContact { + saveArvaPageContacts(pageId: $pageId, input: $pageContactInput) { + succeeded + message + __typename + } + } + arvaRelatedPages { + saveRelatedPages(pageId: $pageId, input: $relatedPagesInput) { + succeeded + message + __typename + } + } + arvaService { + saveArvaServicesForPage(pageId: $pageId, input: $serviceInput) { + succeeded + message + __typename + } + } + } + """ + return create_mutation, follow_up_mutation + + +def prepare_record_variables(row: Tuple) -> Dict: + """ + Prepares variables for the initial GraphQL mutation for a single record. + + Args: + row (Tuple): A tuple containing the article data + (article_id, locale, title, tags, path, content). + + Returns: + Dict: A dictionary containing the `article_id` and `variables` + for the GraphQL mutation. + """ + article_id, locale, title, tags, path, content = row + return { + "article_id": article_id, + "variables": { + "content": content, + "description": "", + "editor": "code", + "isPrivate": False, + "isPublished": False, + "locale": locale, + "path": path, + "tags": tags.split(";"), + "title": title, + }, + } + + +def process_record( + cursor: sqlite3.Cursor, + env_table_name: str, + api_config: Dict[str, Any], + row: Tuple[Any, ...], + callback: Callable[[str], None] = print, +) -> None: + """ + Processes a single record by executing the initial and follow-up GraphQL mutations. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + api_config (Dict[str, Any]): API configuration containing the GraphQL + URL, headers, and mutation strings. + row (Tuple[Any, ...]): A tuple containing the article data. + callback (Callable[[str], None]): A callback function for logging. Defaults to `print`. + + Returns: + None + """ + try: + record_data = prepare_record_variables(row) + article_id = record_data["article_id"] + variables = record_data["variables"] + + # Execute the initial GraphQL mutation + response_data = execute_graphql_mutation(api_config, variables, "create") + if not response_data or not response_data.get("data"): + callback( + f"Failed to process record for path: {variables.get('path', 'unknown')}" + ) + return + + create_result = response_data["data"]["pages"]["create"] + if not create_result or not create_result["responseResult"]["succeeded"]: + error_message = create_result.get("responseResult", {}).get( + "message", "Unknown error" + ) + callback(f"Failed to create page: {error_message}") + return + + page_id = create_result["page"]["id"] + + # Update the database with success + update_record_status(cursor, env_table_name, page_id, variables) + + # Fetch and process related data + related_data = fetch_related_data(cursor, env_table_name, article_id) + handle_follow_up_mutation(api_config, page_id, related_data, callback) + except Exception as error: # pylint: disable=broad-except + callback(f"Error while processing record: {error}") + + +def update_record_status( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, variables: Dict[str, str] +) -> None: + """ + Updates the status of a record in the database after a successful operation. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + page_id (int): The ID of the created page. + variables (Dict[str, str]): The variables used for the GraphQL + mutation, including `path` and `locale`. + + Returns: + None + """ + try: + query = ( + f"UPDATE {env_table_name} " + "SET exp_article_id = ?, status = 'succeeded' " + "WHERE path = ? AND locale = ?" + ) + parameters = ( + page_id, + variables.get("path"), + variables.get("locale"), + ) + + if not parameters[1] or not parameters[2]: + raise ValueError("Missing required variables: 'path' or 'locale'") + + cursor.execute(query, parameters) + except sqlite3.Error as db_error: + raise RuntimeError( + f"Database error while updating record status: {db_error}" + ) from db_error + except ValueError as value_error: + raise ValueError(f"Invalid input: {value_error}") from value_error + + +def execute_graphql_mutation( + api_config: Dict[str, Any], variables: Dict[str, Any], mutation_type: str +) -> Dict[str, Any]: + """ + Executes a GraphQL mutation (create or follow-up). + + Args: + api_config (Dict[str, Any]): API configuration containing the GraphQL + URL, headers, and mutation strings. + variables (Dict[str, Any]): Variables for the GraphQL query. + mutation_type (str): The type of mutation ("create" or "follow_up"). + + Returns: + Dict[str, Any]: The response data from the API call. + + Raises: + ValueError: If the mutation type is not found in the API configuration. + RuntimeError: If the request fails or returns a non-200 status code. + """ + if mutation_type not in api_config: + raise ValueError(f"Invalid mutation type: {mutation_type}") + + payload = {"query": api_config[mutation_type], "variables": variables} + + try: + response = requests.post( + api_config["graphql_url"], + json=payload, + headers=api_config["headers"], + verify=False, # nosec: + timeout=10, + ) + response.raise_for_status() # Raise an exception for non-2xx responses + return response.json() + except requests.exceptions.RequestException as request_error: + raise RuntimeError( + f"GraphQL mutation failed: {request_error} - {response.text}" + ) from request_error + + +def handle_follow_up_mutation( + api_config: Dict[str, Any], + page_id: int, + related_data: Dict[str, Any], + callback: Callable[[str], None], +) -> None: + """ + Handles the follow-up GraphQL mutation to save related data for a page. + + Args: + api_config (Dict[str, Any]): API configuration containing the GraphQL + URL, headers, and mutation strings. + page_id (int): The ID of the created page. + related_data (Dict[str, Any]): Related data for institutions, legal acts, contacts, etc. + callback (Callable[[str], None]): A callback function for logging. + + Returns: + None + """ + related_data["pageId"] = page_id + + try: + response_data = execute_graphql_mutation(api_config, related_data, "follow_up") + if response_data.get("data"): + callback(f"Successfully processed related records for pageId: {page_id}") + else: + callback(f"Failed to process related records for pageId: {page_id}") + except RuntimeError as error: + callback(f"Error during follow-up mutation for pageId {page_id}: {error}") + + +def fetch_related_data( + cursor: sqlite3.Cursor, env_table_name: str, article_id: int +) -> Dict[str, List[Dict[str, Any]]]: + """ + Fetches and formats related data (institutions, legal acts, contacts, etc.) + from the database for the follow-up GraphQL mutation. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + article_id (int): The article ID. + + Returns: + Dict[str, List[Dict[str, Any]]]: A dictionary containing related data for + institutions, legal acts, contacts, related pages, and services. + """ + # Fetch institution data + cursor.execute( + f""" + SELECT id, name, url, isResponsible + FROM "{env_table_name}_arva_institution" + WHERE pageId = ? + """, + (article_id,), + ) + institutions = [ + { + "id": row[0], + "name": row[1], + "url": row[2], + "isResponsible": bool(row[3]), # Convert to Boolean + } + for row in cursor.fetchall() + ] + + # Fetch legal act data + cursor.execute( + f""" + SELECT title, url, legalActType, globalId, groupId, versionStartDate + FROM "{env_table_name}_arva_legal_act" + WHERE pageId = ? + """, + (article_id,), + ) + legal_acts = [ + { + "title": row[0], + "url": row[1], + "legalActType": row[2], + "globalId": row[3], + "groupId": row[4], + "versionStartDate": row[5], + } + for row in cursor.fetchall() + ] + + # Fetch page contact data + cursor.execute( + f""" + SELECT contactId, role, firstName, lastName, company, email, phone + FROM "{env_table_name}_arva_page_contact" + WHERE pageId = ? + """, + (article_id,), + ) + contacts = [ + { + "id": row[0], + "role": row[1], + "firstName": row[2], + "lastName": row[3], + "company": row[4], + "email": row[5], + "phone": row[6], + } + for row in cursor.fetchall() + ] + + # Fetch related page data + cursor.execute( + f""" + SELECT id, title, locale + FROM "{env_table_name}_arva_related_pages" + WHERE pageId = ? + """, + (article_id,), + ) + related_pages = [ + {"id": row[0], "title": row[1], "locale": row[2]} for row in cursor.fetchall() + ] + + # Fetch service data + cursor.execute( + f""" + SELECT id, name, url + FROM "{env_table_name}_arva_service" + WHERE pageId = ? + """, + (article_id,), + ) + services = [ + {"id": row[0], "name": row[1], "url": row[2]} for row in cursor.fetchall() + ] + + # Construct and return the related data dictionary + return { + "institutionInput": institutions, + "legalActInput": legal_acts, + "pageContactInput": contacts, + "relatedPagesInput": related_pages, + "serviceInput": services, + } + + +def fetch_table_data( + cursor: sqlite3.Cursor, env_table_name: str, article_id: int, table_suffix: str +) -> List[Dict[str, Any]]: + """ + Fetches data from a specific related table. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + article_id (int): The article ID. + table_suffix (str): The suffix for the related table (e.g., "arva_institution"). + + Returns: + List[Dict[str, Any]]: A list of dictionaries representing the rows fetched from the table. + """ + try: + table_name = f"{env_table_name}_{table_suffix}" + query = "SELECT * FROM ? WHERE pageId = ?" + cursor.execute( + query, + ( + table_name, + article_id, + ), + ) + columns = [col[0] for col in cursor.description] + return [dict(zip(columns, row)) for row in cursor.fetchall()] + except sqlite3.Error as db_error: + raise RuntimeError( + f"Error fetching data from table " f"'{table_name}': {db_error}" + ) from db_error + + +def get_api_config(bearer_token: str, graphql_url: str) -> Dict[str, Any]: + """ + Creates and returns the API configuration required for making GraphQL requests. + + Args: + bearer_token (str): The bearer token used for API authentication. + graphql_url (str): The URL of the GraphQL endpoint. + + Returns: + Dict[str, str]: A dictionary containing the API configuration, including: + - "graphql_url" (str): The GraphQL endpoint URL. + - "headers" (dict): HTTP headers for the request, including the Authorization token. + - "create" (str): The GraphQL mutation string for creating a page. + - "follow_up" (str): The GraphQL mutation string for follow-up operations. + """ + if not bearer_token or not graphql_url: + raise ValueError("Bearer token and GraphQL URL must be provided.") + + headers = { + "Authorization": f"Bearer {bearer_token}", + "Content-Type": "application/json", + } + create_mutation, follow_up_mutation = get_graphql_mutations() + + return { + "graphql_url": graphql_url, + "headers": headers, + "create": create_mutation, + "follow_up": follow_up_mutation, + } + + +def process_records( + db: str, + env: str, + bearer_token: str, + graphql_url: str, + callback: Callable[[str], None] = print, +) -> None: + """ + Processes all records in the database, creating pages and handling related data. + + Args: + db (str): The database file path. + env (str): The environment identifier (e.g., dev, test, prod). + bearer_token (str): The authentication token for API requests. + graphql_url (str): The GraphQL API URL. + callback (Callable[[str], None]): A callback function for logging. + + Returns: + None + """ + try: + conn, cursor, table_name = db_act.initialize_db_connection(db) + api_config = get_api_config(bearer_token, graphql_url) + env_table_name = f"{table_name}_{env}" + + rows = fetch_all_records(cursor, env_table_name) + if not rows: + callback("No records to process.") + return + + for row in rows: + try: + process_record(cursor, env_table_name, api_config, row, callback) + except Exception as record_error: # pylint: disable=broad-except + callback(f"Error processing record: {record_error}") + + conn.commit() + callback("All records processed successfully.") + except Exception as general_error: # pylint: disable=broad-except + callback(f"Error processing records: {general_error}") + finally: + cursor.close() + conn.close()