From 92599e18182ac0e4f58e5d8148bf465809769bcb Mon Sep 17 00:00:00 2001 From: Keijo Laas Date: Mon, 2 Dec 2024 11:20:56 +0200 Subject: [PATCH] tasa initial --- .bandit | 2 + .flake8 | 17 + .github/workflows/build_linux_win.yml | 169 ++++ .../workflows/code-quality-and-security.yml | 65 ++ .gitignore | 162 +++ .pylintrc | 647 ++++++++++++ README.md | 10 +- mypy.ini | 2 + requirements.txt | 10 + src/db_act.py | 334 +++++++ src/gui.py | 630 ++++++++++++ src/helper.py | 97 ++ src/low.png | Bin 0 -> 17938 bytes src/main.py | 169 ++++ src/prog.py | 942 ++++++++++++++++++ 15 files changed, 3255 insertions(+), 1 deletion(-) create mode 100644 .bandit create mode 100644 .flake8 create mode 100644 .github/workflows/build_linux_win.yml create mode 100644 .github/workflows/code-quality-and-security.yml create mode 100644 .gitignore create mode 100644 .pylintrc create mode 100644 mypy.ini create mode 100644 requirements.txt create mode 100644 src/db_act.py create mode 100644 src/gui.py create mode 100644 src/helper.py create mode 100644 src/low.png create mode 100644 src/main.py create mode 100644 src/prog.py diff --git a/.bandit b/.bandit new file mode 100644 index 0000000..27970d5 --- /dev/null +++ b/.bandit @@ -0,0 +1,2 @@ +[bandit] +skips = B608,B601 \ No newline at end of file diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..f5717b1 --- /dev/null +++ b/.flake8 @@ -0,0 +1,17 @@ +[flake8] +# Max line length compatible with Black +max-line-length = 100 + +# Directories and files to exclude from linting +exclude = + venv, + .git, + __pycache__, + build, + dist, + tests/* + +# Ignore rules that conflict with Black formatting +ignore = + E203, + W503 diff --git a/.github/workflows/build_linux_win.yml b/.github/workflows/build_linux_win.yml new file mode 100644 index 0000000..d8ff7dd --- /dev/null +++ b/.github/workflows/build_linux_win.yml @@ -0,0 +1,169 @@ +name: Build and Release + +on: + push: + branches: + - main + - develop + - 'feature/**' + - 'bugfix/**' + paths-ignore: + - "**/README.md" + +permissions: + contents: write # Necessary for pushing tags to the repository + +jobs: + versioning: + runs-on: ubuntu-latest + outputs: + VERSION: ${{ steps.get_version.outputs.VERSION }} + BUILD_TYPE: ${{ steps.get_version.outputs.BUILD_TYPE }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Get version information + id: get_version + run: | + COMMIT_HASH=$(git rev-parse --short HEAD) + BUILD_ID=${GITHUB_RUN_NUMBER} + YEAR=$(date +'%y') + WEEK=$(date +'%U') + Z=0 + + if [[ "${GITHUB_REF_NAME}" == "main" ]]; then + VERSION="${YEAR}.${WEEK}.${Z}-${BUILD_ID}" + BUILD_TYPE="stable" + elif [[ "${GITHUB_REF_NAME}" == "develop" ]]; then + VERSION="${YEAR}.${WEEK}.${Z}-${COMMIT_HASH}-rc.${BUILD_ID}" + BUILD_TYPE="rc" + elif [[ "${GITHUB_REF_NAME}" == bugfix/* ]]; then + Z=1 # Increment z for bugfix branches + VERSION="${YEAR}.${WEEK}.${Z}-${COMMIT_HASH}-dev.${BUILD_ID}" + BUILD_TYPE="dev" + elif [[ "${GITHUB_REF_NAME}" == feature/* ]]; then + VERSION="${YEAR}.${WEEK}.${Z}-${COMMIT_HASH}-dev.${BUILD_ID}" + BUILD_TYPE="dev" + else + echo "Unsupported branch type: ${GITHUB_REF_NAME}" + exit 1 + fi + + echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_OUTPUT + + build-windows: + runs-on: windows-latest + needs: versioning + env: + VERSION: ${{ needs.versioning.outputs.VERSION }} + BUILD_TYPE: ${{ needs.versioning.outputs.BUILD_TYPE }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.12.7 + + - name: Install Dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install nuitka + + - name: Build executable + run: | + mkdir build + nuitka --standalone --onefile --output-dir=build/windows --output-filename=tasa.exe src/gui.py ` + --include-data-files=src/low.png=low.png --assume-yes-for-downloads + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: tasa-windows + path: build/windows/tasa.exe + + build-linux: + runs-on: ubuntu-latest + needs: versioning + env: + VERSION: ${{ needs.versioning.outputs.VERSION }} + BUILD_TYPE: ${{ needs.versioning.outputs.BUILD_TYPE }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.12.7 + + - name: Install Dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install nuitka + + - name: Build executable + run: | + mkdir -p build/linux + nuitka --standalone --onefile --output-dir=build/linux --output-filename=tasa src/gui.py \ + --include-data-files=src/low.png=low.png --assume-yes-for-downloads + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: tasa-linux + path: build/linux/tasa + + tag_and_release: + runs-on: ubuntu-latest + needs: + - build-windows + - build-linux + - versioning + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Git user + run: | + git config user.name "${{ github.actor }}" + git config user.email "${{ github.actor }}@users.noreply.github.com" + + - name: Create Git Tag + run: | + git tag -a "v${{ needs.versioning.outputs.VERSION }}" -m "Release v${{ needs.versioning.outputs.VERSION }}" + git push origin "v${{ needs.versioning.outputs.VERSION }}" + + - name: Download Windows artifact + uses: actions/download-artifact@v4 + with: + name: tasa-windows + path: artifacts/tasa-windows + + - name: Download Linux artifact + uses: actions/download-artifact@v4 + with: + name: tasa-linux + path: artifacts/tasa-linux + + - name: Create GitHub Release + uses: ncipollo/release-action@v1 + with: + artifacts: | + artifacts/tasa-windows/tasa.exe + artifacts/tasa-linux/tasa + token: ${{ secrets.GITHUB_TOKEN }} + tag: v${{ needs.versioning.outputs.VERSION }} + name: Release v${{ needs.versioning.outputs.VERSION }} + body: | + This release contains the following: + - Built files: tasa.exe (Windows), tasa (Linux) + - Build Type: ${{ needs.versioning.outputs.BUILD_TYPE }} + draft: true # Set to false if you want it published immediately diff --git a/.github/workflows/code-quality-and-security.yml b/.github/workflows/code-quality-and-security.yml new file mode 100644 index 0000000..1be71e9 --- /dev/null +++ b/.github/workflows/code-quality-and-security.yml @@ -0,0 +1,65 @@ +name: Code Quality and Security Checks + +on: + pull_request: + branches: + - main + - develop + +jobs: + quality_and_security_checks: + runs-on: ubuntu-latest + + steps: + # Step 1: Checkout code + - name: Checkout code + uses: actions/checkout@v3 + + # Step 2: Set up Python environment + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.12.7 + + # Step 3: Install dependencies + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pylint flake8 black bandit mypy pip-audit radon xenon semgrep + + # Step 4: Format code with Black (Check Only) + - name: Check code formatting with Black + run: black --check src + + # Step 5: Lint with Pylint + - name: Run Pylint + run: pylint $(find src -name "*.py" -not -path "./venv/*") + + # Step 6: Check code style with Flake8 + - name: Run Flake8 + run: flake8 src --exclude=venv + + # Step 7: Type Checking with Mypy + - name: Run Mypy + run: mypy src + + # Step 8: Static Analysis for Security Issues with Bandit + - name: Run Bandit + run: bandit -r src --exclude ./venv --ini .bandit + + # Step 9: Dependency Vulnerability Check with pip-audit + - name: Run Pip-audit + run: pip-audit + + # Step 10: Analyze Code Complexity with Radon + - name: Run Radon + run: radon cc src -s -a + + # Step 11: Monitor Code Quality Metrics with Xenon + - name: Run Xenon + run: xenon src --max-absolute B --max-modules B --max-average A + + # Step 12: Lightweight Static Analysis with Semgrep + - name: Run Semgrep + run: semgrep --config auto diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..82f9275 --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..4febab6 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,647 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Resolve imports to .pyi stubs if available. May reduce no-member messages and +# increase not-an-iterable messages. +prefer-stubs=no + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.12 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of positional arguments for function / method. +; max-positional-arguments=5 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + +# Let 'consider-using-join' be raised when the separator to join on would be +# non-empty (resulting in expected fixes of the type: ``"- " + " - +# ".join(items)``) +suggest-join-with-non-empty-separator=yes + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/README.md b/README.md index 5ae0c38..aaa6a6f 100644 --- a/README.md +++ b/README.md @@ -1 +1,9 @@ -# TASA \ No newline at end of file +``` +████████╗ █████╗ ███████╗ █████╗ +╚══██╔══╝██╔══██╗██╔════╝██╔══██╗ + ██║ ███████║███████╗███████║ + ██║ ██╔══██║╚════██║██╔══██║ + ██║ ██║ ██║███████║██║ ██║ + ╚═╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ +``` +### Teenusehaldurite Arva Sisestuse Automatiseerija diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..976ba02 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,2 @@ +[mypy] +ignore_missing_imports = True diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e14d5ca --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +certifi==2024.8.30 +charset-normalizer==3.4.0 +docutils==0.21.2 +idna==3.10 +Kivy==2.3.0 +Kivy-Garden==0.1.5 +Pygments==2.18.0 +requests==2.32.3 +types-requests==2.32.0.20241016 +urllib3==2.2.3 diff --git a/src/db_act.py b/src/db_act.py new file mode 100644 index 0000000..5ad53c9 --- /dev/null +++ b/src/db_act.py @@ -0,0 +1,334 @@ +"""TASA DB Logic""" + +import os +from typing import Callable, List, Tuple +import sqlite3 + + +def initialize_db_connection(db: str) -> Tuple[sqlite3.Connection, sqlite3.Cursor, str]: + """ + Initializes the SQLite database connection and returns the connection, + cursor, and the table name. + + Args: + db (str): The database file path. + + Returns: + Tuple[sqlite3.Connection, sqlite3.Cursor, str]: A tuple containing the database + connection, cursor, and table name. + """ + conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES) + cursor = conn.cursor() + table_name = db.replace(".db", "") + return conn, cursor, table_name + + +def db_exists(db: str, callback: Callable[[str], None] = print) -> bool: + """ + Checks whether the database file exists. + + Args: + db (str): The name of the database file (without '.db' extension). + callback (Callable[[str], None]): A callback function for logging errors. + + Returns: + bool: True if the database file exists, False otherwise. + """ + if os.path.exists(f"{db}.db"): + return True + callback("Project doesn't exist!") + return False + + +def create_db(db: str, callback: Callable[[str], None] = print) -> None: + """ + Creates the database and tables required for the application. + + Args: + db (str): The name of the database file. + callback (Callable): A function for logging messages, default is `print`. + """ + conn, cursor, table_name = initialize_db_connection(db) + + try: + create_base_tables(cursor) + create_initial_table(cursor, table_name) + create_env_tables(cursor, table_name) + conn.commit() + callback(f"Database and tables created successfully in: {db}") + except sqlite3.Error as error: + callback(f"Error creating database tables: {error}") + finally: + cursor.close() + conn.close() + + +def create_base_tables(cursor: sqlite3.Cursor) -> None: + """ + Creates the base tables like 'last_run'. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + """ + cursor.execute( + """ + CREATE TABLE IF NOT EXISTS last_run ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + last_sync_timestamp TEXT, + status TEXT + ) + """ + ) + + cursor.execute( + """ + INSERT INTO last_run (last_sync_timestamp, status) + VALUES (datetime('now'), 'initial') + """ + ) + + +def create_initial_table(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the initial table for the project. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The base name of the table. + """ + cursor.execute( + f""" + CREATE TABLE IF NOT EXISTS {table_name}_initial ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + locale TEXT, + title TEXT, + tags TEXT, + path TEXT, + content TEXT + ) + """ + ) + + +def create_env_tables(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the environment-specific tables and triggers. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The base name of the table. + """ + envs = ["_dev", "_test", "_prod"] + for env in envs: + env_table_name = f"{table_name}{env}" + create_main_env_table(cursor, env_table_name) + create_update_trigger(cursor, env_table_name) + create_related_tables(cursor, env_table_name) + + +def create_main_env_table(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the main environment-specific table. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The environment-specific table name. + """ + cursor.execute( + f""" + CREATE TABLE IF NOT EXISTS {table_name} ( + exp_article_id INTEGER, + article_id INTEGER PRIMARY KEY, + locale TEXT, + title TEXT, + tags TEXT, + path TEXT, + content TEXT, + status TEXT, + modified_timestamp TEXT + ) + """ + ) + + +def create_update_trigger(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates a trigger to update the modified timestamp on the environment table. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The environment-specific table name. + """ + cursor.execute( + f""" + CREATE TRIGGER IF NOT EXISTS update_modified_timestamp_{table_name} + AFTER UPDATE ON {table_name} + BEGIN + UPDATE {table_name} + SET modified_timestamp = datetime('now') + WHERE article_id = NEW.article_id; + END + """ + ) + + +def create_related_tables(cursor: sqlite3.Cursor, table_name: str) -> None: + """ + Creates the related tables for the environment. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + table_name (str): The environment-specific table name. + """ + related_tables = { + "arva_institution": f""" + id INTEGER, + pageId INTEGER, + name TEXT, + url TEXT, + isResponsible BOOLEAN, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_legal_act": f""" + id INTEGER, + pageId INTEGER, + title TEXT, + url TEXT, + legalActType TEXT, + globalId REAL, + groupId INTEGER, + versionStartDate TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_page_contact": f""" + id INTEGER, + contactId INTEGER, + pageId INTEGER, + role TEXT, + firstName TEXT, + lastName TEXT, + company TEXT, + email TEXT, + phone TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_related_pages": f""" + id INTEGER, + pageId INTEGER, + title TEXT, + locale TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + "arva_service": f""" + id INTEGER, + pageId INTEGER, + name TEXT, + url TEXT, + FOREIGN KEY(pageId) REFERENCES {table_name}(article_id) ON DELETE CASCADE + """, + } + + for table_suffix, schema in related_tables.items(): + cursor.execute( + f""" + CREATE TABLE IF NOT EXISTS {table_name}_{table_suffix} ( + {schema} + ) + """ + ) + + +def copy_table( + db: str, source_env: str, target_env: str, callback: Callable[[str], None] = print +) -> None: + """ + Copies data from a source table to a target table in the same SQLite database, + including associated related tables. + + Args: + db (str): The name of the database file. + source_env (str): The source environment identifier (e.g., 'dev'). + target_env (str): The target environment identifier (e.g., 'prod'). + callback (Callable): A function for error or status messages, default is `print`. + """ + conn, cursor, table_name = initialize_db_connection(db) + + try: + # Copy main table data + _copy_main_table(cursor, table_name, source_env, target_env) + + # Copy related tables + related_tables = [ + "arva_institution", + "arva_legal_act", + "arva_page_contact", + "arva_related_pages", + "arva_service", + ] + _copy_related_tables(cursor, table_name, source_env, target_env, related_tables) + + # Commit changes + conn.commit() + callback("Data copied successfully.") + except sqlite3.Error as error: + callback(f"An error occurred: {error}") + + +def _copy_main_table( + cursor: sqlite3.Cursor, table_name: str, source_env: str, target_env: str +) -> None: + """ + Copies data from the main source table to the target table. + + Args: + cursor: SQLite database cursor. + table_name (str): Base table name. + source_env (str): Source environment identifier. + target_env (str): Target environment identifier. + """ + source_table = f"{table_name}_{source_env}" + target_table = f"{table_name}_{target_env}" + + cursor.execute( + f""" + INSERT INTO {target_table} (article_id, locale, title, tags, path, content) + SELECT article_id, locale, title, tags, path, content + FROM {source_table} + """ + ) + + +def _copy_related_tables( + cursor: sqlite3.Cursor, + table_name: str, + source_env: str, + target_env: str, + related_tables: List[str], +) -> None: + """ + Copies data from related source tables to the target tables. + + Args: + cursor: SQLite database cursor. + table_name (str): Base table name. + source_env (str): Source environment identifier. + target_env (str): Target environment identifier. + related_tables (list): List of related table names to copy. + """ + for related_table in related_tables: + source_related_table = f"{table_name}_{source_env}_{related_table}" + target_related_table = f"{table_name}_{target_env}_{related_table}" + + # Dynamically fetch column names starting from the 3rd column + columns = cursor.execute( + f"PRAGMA table_info({source_related_table})" + ).fetchall()[2:] + column_names = ", ".join(column[1] for column in columns) + + cursor.execute( + f""" + INSERT INTO {target_related_table} (id, pageId, {column_names}) + SELECT id, pageId, {column_names} + FROM {source_related_table} + """ + ) diff --git a/src/gui.py b/src/gui.py new file mode 100644 index 0000000..82d1dfd --- /dev/null +++ b/src/gui.py @@ -0,0 +1,630 @@ +"""TASA GUI""" + +import os +import sys +import threading + +# pylint: disable=no-member +from typing import Any, Callable, Dict +from kivy.app import App +from kivy.uix.boxlayout import BoxLayout +from kivy.uix.label import Label +from kivy.uix.button import Button +from kivy.uix.textinput import TextInput +from kivy.uix.spinner import Spinner +from kivy.uix.popup import Popup +from kivy.uix.image import Image +from kivy.core.window import Window +from kivy.clock import Clock +import db_act +import helper +import prog + +# Constants for window dimensions +WINDOW_WIDTH = 400 +WINDOW_HEIGHT = 500 +Window.size = (WINDOW_WIDTH, WINDOW_HEIGHT) + + +def enforce_fixed_size(window: Any, width: int, height: int) -> None: + """ + Ensure the application window remains a fixed size. + + Args: + window (Any): The application window object. + width (int): Desired window width. + height (int): Desired window height. + """ + window.size = (width, height) + + +Window.bind( + on_resize=lambda instance, w, h: enforce_fixed_size( + instance, WINDOW_WIDTH, WINDOW_HEIGHT + ) +) + + +def resource_path(relative_path: str) -> str: + """ + Get the absolute path to a resource, whether running as a script or executable. + + Args: + relative_path (str): The relative path to the resource. + + Returns: + str: The absolute path to the resource. + """ + if hasattr(sys, "_MEIPASS"): # For bundled executables + base_path = sys._MEIPASS # pylint: disable=protected-access + else: # For development + base_path = os.path.dirname(os.path.abspath(__file__)) + + absolute_path = os.path.join(base_path, relative_path) + + if not os.path.exists(absolute_path): + print(f"[ERROR] Resource not found at path: {absolute_path}") + + return absolute_path + + +class LoadingPopup(Popup): + """Popup for displaying a loading indicator.""" + + def __init__(self, **kwargs: Any) -> None: + """ + Initialize the loading popup with a title and layout. + + Args: + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(**kwargs) + self.size_hint = (0.5, 0.3) + self.auto_dismiss = False + self.title = "Loading" + self.add_widget(self._create_layout()) + + @staticmethod + def _create_layout() -> BoxLayout: + """ + Create the layout for the loading popup. + + Returns: + BoxLayout: The layout containing the loading message. + """ + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget( + Label(text="Please wait...", font_size=18, size_hint=(1, 0.8)) + ) + return layout + + +class InputPopup(Popup): + """Popup for accepting user input.""" + + def __init__( + self, title: str, hint_text: str, callback: Callable[[str], None], **kwargs: Any + ) -> None: + """ + Initialize the input popup with a title, text input, and submit button. + + Args: + title (str): Title of the popup. + hint_text (str): Placeholder text for the input field. + callback (Callable[[str], None]): Function to handle submitted input. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title=title, size_hint=(0.8, 0.35), **kwargs) + self.callback = callback + self.input_field = TextInput( + hint_text=hint_text, multiline=False, size_hint=(1, None), height=40 + ) + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.input_field) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of user input. + + Args: + instance (Button): The submit button instance. + """ + self.callback(self.input_field.text) + self.dismiss() + + +class CopyDataPopup(Popup): + """Popup for copying data between tables.""" + + def __init__( + self, callback: Callable[[str, str, str], None], **kwargs: Any + ) -> None: + """ + Initialize the CopyDataPopup with required fields and submit action. + + Args: + callback (Callable[[str, str, str], None]): Function to call when data is submitted. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title="Copy Data", size_hint=(0.8, 0.55), **kwargs) + self.callback = callback + self.project_name = TextInput( + hint_text="Project Name", multiline=False, size_hint=(1, None), height=40 + ) + self.source_env = Spinner( + text="Source Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + self.target_env = Spinner( + text="Target Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.project_name) + layout.add_widget(self.source_env) + layout.add_widget(self.target_env) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of data from the popup. + + Args: + instance (Button): The button instance that triggered the submission. + """ + self.callback( + self.project_name.text, self.source_env.text, self.target_env.text + ) + self.dismiss() + + +class PullWorkflowPopup(Popup): + """Popup for pulling data from ARVA.""" + + def __init__( + self, callback: Callable[[str, str, str, str], None], **kwargs: Any + ) -> None: + """ + Initialize the PullWorkflowPopup with required fields and submit action. + + Args: + callback (Callable[[str, str, str, str], None]): + Function to call when data is submitted. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title="Pull Data from ARVA", size_hint=(0.8, 0.65), **kwargs) + self.callback = callback + self.project_name = TextInput( + hint_text="Project Name", multiline=False, size_hint=(1, None), height=40 + ) + self.source_env = Spinner( + text="Source Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + self.token_input = TextInput( + hint_text="ARVA Token", multiline=False, size_hint=(1, None), height=40 + ) + self.article_id_input = TextInput( + hint_text="Article IDs (comma-separated)", + multiline=False, + size_hint=(1, None), + height=40, + ) + + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.project_name) + layout.add_widget(self.token_input) + layout.add_widget(self.article_id_input) + layout.add_widget(self.source_env) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of data from the popup. + + Args: + instance (Button): The button instance that triggered the submission. + """ + self.callback( + self.project_name.text, + self.source_env.text, + self.token_input.text, + self.article_id_input.text, + ) + self.dismiss() + + +class InsertWorkflowPopup(Popup): + """Popup for inserting data into ARVA.""" + + def __init__( + self, callback: Callable[[str, str, str], None], **kwargs: Any + ) -> None: + """ + Initialize the InsertWorkflowPopup with required fields and submit action. + + Args: + callback (Callable[[str, str, str], None]): Function to call when data is submitted. + **kwargs (Any): Additional keyword arguments for the popup. + """ + super().__init__(title="Insert Data to ARVA", size_hint=(0.8, 0.55), **kwargs) + self.callback = callback + self.project_name = TextInput( + hint_text="Project Name", multiline=False, size_hint=(1, None), height=40 + ) + self.target_env = Spinner( + text="Target Env", + values=["dev", "test", "prod"], + size_hint=(1, None), + height=40, + ) + self.token_input = TextInput( + hint_text="ARVA Token", multiline=False, size_hint=(1, None), height=40 + ) + + submit_button = Button( + text="Submit", size_hint=(1, None), height=40, background_color=[0, 1, 0, 1] + ) + submit_button.bind(on_press=self._submit) + + layout = BoxLayout(orientation="vertical", padding=10, spacing=10) + layout.add_widget(self.project_name) + layout.add_widget(self.token_input) + layout.add_widget(self.target_env) + layout.add_widget(submit_button) + self.add_widget(layout) + + def _submit(self, _instance: Button) -> None: + """ + Handle the submission of data from the popup. + + Args: + instance (Button): The button instance that triggered the submission. + """ + self.callback( + self.project_name.text, self.target_env.text, self.token_input.text + ) + self.dismiss() + + +class MainScreen(BoxLayout): + """Main application screen.""" + + def __init__(self, **kwargs: Dict[str, Any]) -> None: + """ + Initialize the main application screen. + + Args: + **kwargs (Dict[str, Any]): Additional keyword arguments for the layout. + """ + super().__init__(orientation="vertical", spacing=10, padding=20, **kwargs) + self.loading_popup = LoadingPopup() + self._create_ui() + + def _create_ui(self) -> None: + """Create the main UI components.""" + # Add logo at the top + self.add_widget( + Image( + source=resource_path("low.png"), + size_hint=(1, 0.3), + ) + ) + + # Add title + self.add_widget(Label(text="TASA", font_size=28, size_hint=(1, 0.1))) + + # Add buttons + buttons = [ + ("Create a DB File", self.create_db), + ("Copy Data Between Tables", self.copy_data), + ("Pull Data from ARVA", self.pull_data), + ("Insert Data to ARVA", self.insert_data), + ] + for label, action in buttons: + self.add_widget(self._create_button(label, action)) + + # Add log area at the bottom + self.log_output = self._create_log_area() + self.add_widget(self.log_output) + + @staticmethod + def _create_button(label: str, action: Callable[[Button], None]) -> Button: + """ + Create a standardized button with the given label and action. + + Args: + label (str): The text to display on the button. + action (Callable[[Button], None]): The function to call when the button is pressed. + + Returns: + Button: The created button widget. + """ + button = Button(text=label, size_hint=(1, None), height=50) + button.bind(on_press=action) + return button + + @staticmethod + def _create_log_area() -> TextInput: + """ + Create a text input widget to serve as the log area. + + Returns: + TextInput: The text input widget configured as a log area. + """ + return TextInput( + multiline=True, + readonly=True, + size_hint=(1, 0.4), + background_color=[0.9, 0.9, 0.9, 1], + foreground_color=[0, 0, 0, 1], + ) + + def log_message(self, message: str) -> None: + """ + Log a message to the log area asynchronously. + + Args: + message (str): The message to log. + """ + Clock.schedule_once(lambda dt: self._append_message(message)) + + def _append_message(self, message: str) -> None: + """ + Append a message to the log output. + + Args: + message (str): The message to append. + """ + self.log_output.text += f"{message}\n" + + def show_loading(self) -> None: + """Show the loading popup.""" + self.loading_popup.open() + + def hide_loading(self) -> None: + """Hide the loading popup.""" + self.loading_popup.dismiss() + + def create_db(self, _instance: Button) -> None: + """Open popup to create a database.""" + popup = InputPopup( + title="Enter New Project Name", + hint_text="Project Name", + callback=self._handle_create_db, + ) + popup.open() + + def _handle_create_db(self, project_name: str) -> None: + """ + Handle the creation of a new database. + + Args: + project_name (str): The name of the new project/database. + """ + if helper.valid_project_name(project_name, callback=self.log_message): + db_path = f"{project_name}.db" + if not db_act.db_exists(project_name, callback=self.log_message): + self.show_loading() + threading.Thread( + target=self._perform_create_db, args=(db_path,) + ).start() + else: + self.log_message(f"Database '{db_path}' already exists!") + + def _perform_create_db(self, db_path: str) -> None: + """ + Perform the database creation in a separate thread. + + Args: + db_path (str): The file path for the new database. + """ + try: + db_act.create_db(db_path, callback=self.log_message) + self.log_message(f"Database '{db_path}' created successfully!") + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + def copy_data(self, _instance: Button) -> None: + """Copy data between tables.""" + popup = CopyDataPopup(callback=self._handle_copy_data) + popup.open() + + def _handle_copy_data( + self, project_name: str, source_env: str, target_env: str + ) -> None: + """ + Handle the copy data action from the user input. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment to copy data from. + target_env (str): The target environment to copy data to. + """ + if ( + helper.valid_project_name(project_name, callback=self.log_message) + and db_act.db_exists(project_name, callback=self.log_message) + and helper.check_target_env(source_env, callback=self.log_message) + and helper.check_target_env(target_env, callback=self.log_message) + ): + self.show_loading() + threading.Thread( + target=self._perform_copy_data, + args=(project_name, source_env, target_env), + ).start() + + def _perform_copy_data( + self, project_name: str, source_env: str, target_env: str + ) -> None: + """ + Perform the data copy operation in a separate thread. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment. + target_env (str): The target environment. + """ + try: + db_act.copy_table( + f"{project_name}.db", source_env, target_env, callback=self.log_message + ) + self.log_message( + f"Data copied from {source_env} to {target_env} successfully!" + ) + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + def pull_data(self, _instance: Button) -> None: + """Pull data from ARVA.""" + popup = PullWorkflowPopup(callback=self._handle_pull_data) + popup.open() + + def _handle_pull_data( + self, project_name: str, source_env: str, token: str, article_ids: str + ) -> None: + """ + Handle the pull data action from the user input. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment to pull data from. + token (str): The ARVA authentication token. + article_ids (str): Comma-separated list of article IDs. + """ + if ( + helper.valid_project_name(project_name, callback=self.log_message) + and db_act.db_exists(project_name, callback=self.log_message) + and helper.check_target_env(source_env, callback=self.log_message) + ): + self.show_loading() + threading.Thread( + target=self._perform_pull_data, + args=(project_name, source_env, token, article_ids), + ).start() + + def _perform_pull_data( + self, project_name: str, source_env: str, token: str, article_ids: str + ) -> None: + """ + Perform the data pull operation in a separate thread. + + Args: + project_name (str): The name of the project/database. + source_env (str): The source environment. + token (str): The ARVA authentication token. + article_ids (str): Comma-separated list of article IDs. + """ + try: + config = { + "db": f"{project_name}.db", + "env": source_env, + "bearer_token": token, + "graphql_url": helper.get_env_url(source_env), + } + prog.get_arva_records(config, article_ids, callback=self.log_message) + # self.log_message("Data pulled from ARVA successfully!") + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + def insert_data(self, _instance: Button) -> None: + """Insert data into ARVA.""" + popup = InsertWorkflowPopup(callback=self._handle_insert_data) + popup.open() + + def _handle_insert_data( + self, project_name: str, target_env: str, token: str + ) -> None: + """ + Handle the insert data action from the user input. + + Args: + project_name (str): The name of the project/database. + target_env (str): The target environment to insert data into. + token (str): The ARVA authentication token. + """ + if ( + helper.valid_project_name(project_name, callback=self.log_message) + and db_act.db_exists(project_name, callback=self.log_message) + and helper.check_target_env(target_env, callback=self.log_message) + ): + self.show_loading() + threading.Thread( + target=self._perform_insert_data, args=(project_name, target_env, token) + ).start() + + def _perform_insert_data( + self, project_name: str, target_env: str, token: str + ) -> None: + """ + Perform the data insertion operation in a separate thread. + + Args: + project_name (str): The name of the project/database. + target_env (str): The target environment. + token (str): The ARVA authentication token. + """ + graphql_url = helper.get_env_url(target_env) + if not graphql_url: + self.log_message( + f"Error: GraphQL URL for environment '{target_env}' not found." + ) + self.hide_loading() + return + + try: + prog.process_records( + f"{project_name}.db", + target_env, + token, + graphql_url, # Now guaranteed to be a valid string + callback=self.log_message, + ) + self.log_message("Data inserted into ARVA successfully!") + except Exception as e: # pylint: disable=broad-except + self.log_message(f"Error: {e}") + finally: + self.hide_loading() + + +class TASAApp(App): + """Main application class.""" + + def build(self) -> MainScreen: + """Build and return the main screen of the application.""" + return MainScreen() + + +if __name__ == "__main__": + TASAApp().run() diff --git a/src/helper.py b/src/helper.py new file mode 100644 index 0000000..670918e --- /dev/null +++ b/src/helper.py @@ -0,0 +1,97 @@ +"""TASA helpers""" + +import os +import re +from typing import Callable, Optional + + +def valid_project_name(name: str, callback=print) -> bool: + """ + Validates the given project name based on specified rules. + + Args: + name (str): The project name to validate. + callback (Callable): Function to handle error messages, default is `print`. + + Returns: + bool: True if the project name is valid, False otherwise. + """ + if not name: + callback("Name can't be empty!") + return False + + if not re.match(r"^[a-zA-Z0-9_]+$", name): + callback("Invalid project name (only alphanumeric characters and underscores)!") + return False + + if name[0].isdigit(): + callback("Project name cannot start with a number!") + return False + + if name[0] == "_": + callback("Project name cannot start with an underscore!") + return False + + if name[-1] == "_": + callback("Project name cannot end with an underscore!") + return False + + return True + + +def get_env_url(env: str) -> Optional[str]: + """ + Retrieves the GraphQL URL for the given environment. + + Args: + env (str): The environment identifier (e.g., 'dev', 'test', 'prod'). + + Returns: + Optional[str]: The corresponding GraphQL URL or None if the environment is invalid. + """ + envs = { + "dev": "https://arva-main.dev.riaint.ee/graphql", + "test": "https://arva-main.test.riaint.ee/graphql", + "prod": "", + } + return envs.get(env) + + +def check_target_env(target_env: str, callback: Callable[[str], None] = print) -> bool: + """ + Validates the selected target environment. + + Args: + target_env (str): The environment identifier to validate. + callback (Callable[[str], None]): A callback function for logging errors. + + Returns: + bool: True if the target environment is valid, False otherwise. + """ + if target_env in {"dev", "test", "prod"}: + return True + callback("Invalid input. Please choose from 'dev', 'test', or 'prod'.") + return False + + +def get_arva_token(target_env: str) -> str: + """ + Retrieves the ARVA token for the given target environment. + + Args: + target_env (str): The target environment identifier. + + Returns: + str: The ARVA token for the target environment. + """ + target_env_upper = target_env.upper() + token = os.getenv(f"ARVA_TOKEN_{target_env_upper}") + + if token: + print(f"ARVA_TOKEN_{target_env_upper} found in environment.") + return token + + token = input("Enter ARVA token: ").strip() + os.environ[f"ARVA_TOKEN_{target_env_upper}"] = token + print(f"ARVA_TOKEN_{target_env_upper} saved to the environment.") + return token diff --git a/src/low.png b/src/low.png new file mode 100644 index 0000000000000000000000000000000000000000..2deae8bea407f99745bdb47c66c0a28727a26109 GIT binary patch literal 17938 zcmaL91yqz@^e8%n0@5IYbSX-M(j_6F2&l9)h;-x7-7SKIln96dQbS6Ok`hCA=TO7Y z9rujC`+sY_x8A+FSS-Hz&ffd%K6~#okuTMhh;Px}fOTq1&QDZfs?YHD+EGFiTS~Te9oW&5AocdsXoP9BP1oky=||{>kS@ZIl8E6V)cD> z{|tdB^gmOO)%2J|&3JlPOz6#>qbHSK_iBCC;(VrL{%mFiYF>{DxJw_b8l;LJvip%R zFdY7V-s}a$9N&P^Cz3$;9e>L2lz?(RvIxqXi4ccvZr;%MIw^(hT0fo-rm>F{ zFKiuOIS->Y9EiQM*9}#?WcpxC*H}E4M>dC^eV;QUOkxDuQ!}4VVu9Z@b9&{E+iA>? zOxgx%%wr~ksZ`8k1m2NqbH{?0q;&spA68o7@wd}xG_I_y$fv#d#vod8NI!dKj5qCy zn7Ofy?fxk3#roPn?&f5g{PI?Usu<2@B4{&$r5-uLj?%P6TZ4SEG+*`yoYkf zkHlam2(S0tU$r)Qq7XN70^5%49MLFHGaU2#L586u6*Ar63DZ(w#1-GT$c;wS5pt+y zjQ9H~W&hhm-_PP=`uMMFo|>y{n`#Q93d@m4M@LjB_rp&Z*t_GgbRuU8ZwJN3c4i0> zbsXv$qUB#M5z-ZA)-eKEznqs8`Thi}UVLKhEKti7uljnq-Bj+I+)lHUGcq#Lf}f~* zRfSl{a^GL=Wt&8!>9{Z-G2v}zJW&j|FT>-^-DhlQSmk?iT>cN9o}nSb4Y7d(!P7cq zM8pfLnqc)}U0U-PrR0>9cr5uzu?$yVE-tQuO4pM=QTYPx9_&$HhbHfCce^An7vP|5 z|4D$2b+9ONp^3XgTSo`4%Jl^P?@hD!@2gtG#=d1P>4mA}53-=hCjWuvP{>o4Xb~1n zd-2XdB1HHff$?71=WqC=L>#8qhDmb%{#*Y?Y^*TTltW+t8Hru5Xc?bFjDYmRPtWgO z7pyw%asp>x@fME^sCm*M);3-5^C?`yDEMf^YJ zKB+S}^29%~CK;=$s2Kd2nLF`6Z`M>jVn;;L8SCnbzC2ud(pW6eep1-j=u25!kja+Q zq7=`_FiJA;;@^HLN!(Q=*4QYeWa66g)D!37f0L@IV#Nd_CKcQ#KpyFh1^5YTZ>Nr1?ItvqL2M&V3H$) zqb>yF!-#~imRKuGE8Kst|5)mXcf=RW&1AFR8X2^Shemk8FgnDzxwcjt=cFN_M7#J_ zK|;RqKQ(_!VK|KVFgO4J`@Sd2`p1mvza@zx4BNk;agVE4d4p^F#Ut;aAni1Xt$TYe zgibwT?g|QV;?v~w_;IOSQXg#LQsi}00|VtREaY3R$OYtIkoFfc+{VL8ph;U<`4pa!7{2-P0hXE=b)$)==ldjIf$;m(1ce2wVV0Jb!KFf@FaSLF zWvwTBVk8r`*yI;uXBD7{FxaC?SDkEA3kwT_5Lp!cvq+~OFqp5Agc46s=}~*?WsWB2 z`*&aWqRGT2;rlA_5`uhh9u`;HHaqeNi-=f~k5=^ssh)|l)K^yCKDaS}QOAwv`6G-< z2R@%G%-ss*OSSe z7FqtlPrR%)l~e-%)J*gwl*|ys_V(8VA2IT}&rvQ38T`sG?cw2SzR!YvMIW=&Yfbiw z$x`*GGD4Yuzqs$t7E`3aeIFnY?q5^PYRS*@i=h&lMfLUPBymN6doH!VUwZFj$m9eP zh$9p%&TLtz2$f?QKkfED-tHBf2Feg3n7)}VKgm*P3fz0HoPyd?N0U;Da~cdh8kw78 zVlx;(B&3=%r*v0YcPL{NT``!F+}|PJsj@UlTg|rPySBH^Irb-*vpiWNO+~ytCm$OSY6cM3RuiTp&N?ZjC|oJKJalS?S>55N8|yU0X@1gZ0yt zj$HrX=;&wx11m95`qXc(wGJqvDT*^}=^3SrT0Iom-yw=&mK&H2saC!JUfrBN#CD(H z09M{uB!w5^a&KZ_WYH!Z8j%-t5zOBnvU#$*bam(XlMifI=*HW!@2ce4sg#`g8&&=R zkNW!k>D$feyXs8r(~T*anVFt}`jhslYmw`33=O4#kI*C-dva=rWadUeZM>mjK#Y`A?t++gx zqH|(FH@fY{Ktn>_kkl|9`O#ql*$a*L?(TC~U6W!IHZumKu4oHOOQc|&7EsHGeSgd= zlrgWJ>tLD&S3$3xPjg;7dw$U*I(BHlQLoKsVDZ#EyA`0eZpgUb7>jCY_bAdd zfWk$vjfop@Q$0qnyPyB4y+Fdf17y~=Hh@F1qi}H{%vc>U_@NN@?3-s2ffxt zI611jJK*^IE*mu|glkd^+Ec&)Pemm~bsLjN-nx+9>6NT`iCyV%Xp37{P>=`~Q0JmjnSncEU4QLj+MNq@ zT;JLtTDcj9^Dw*5QjmB5R5VCWlC3i=;=UKw`f6lh!{zDre6(PjlS z4yD^S=#i_*?JKZDP{@}AF($ou`a{=d%#R5M%gV~CaNRBh9*w?4DC#RPX)O!B@N)Fv zf|?^k_!V2q z$o7CJtfV~m^5R7Xf$V=hZ+lI>BNLdN(?p19ssI!e9G$0S9v&Wk$_yE)#}%ZrrIXE~ zqoWKT95r%a10j7l=~h0U!U^u5B62JEzH7eRddQLY=ggn>#fI{_w*Af5?UGL4G&w!% z$t*Z#W@bbp>vCrOyrl#>Que!NeeG*A(Nm5F;*b#(qY^YHk% zqt###k?>lfrwkflBZ@4swp5m5-7Ucw(_^h4FE?8<( zw)~+`xrGR5M5XvIi9I+lcPWCIro-8vx}U#2qeg@kP z#9;aBD-@X)+59hP1a1Z;a->O{EBV{QsL(-Dd>>`zB7WjMNz}1Oy{)5TGj&VGk(*wj z(%Qq@w>qk(cz?$3wI3Fgq7F_XpPpeA%owQyzR(uxK6rndF#i}LgJ(n6&rvn?d+TR? zz3VJFBCQ<43!k+xHlDj#n*oi(eWP;vQKaJlL@{Cw?xZV%9f5gcPNiM{xOPZ{Xx(=P zRzov!Um1vxJ}-%R4k;pCDn*-HRUW_|!}2GLv9iQUuEI%u`|H;)hky|Nf}_VA&Q6Wp zB_$>4_#&0ATYsO`@Nt}ZkpJ0YYLolXsQciOWhZ)#xL!K_n(5Z6^{e}zS@H%T9e?Fa zC(c2jQ2jHm_)*MxQK0~eUuI^#@m=>pV`^7fkz@9xL{3QhozjT@OE_{lK{5vVjWqr)n+pW4+IXRD}7| zSm>MWRj{pve8ZVc{{jXxzBak_e0V&TaoQf!CH~Ge7J7SQds-(tEvjvWn*u>rK#6ca zatm{+?-+^-T?}p_dcXZ5%5cUod*B6GtBCMYwpr7G6h4zIjm}(S#@3FlBh`Ms&h7pD z=1H5r(&EL-Cb9(-{#f+sASw0!y+f8Ful0?M+9)UeOEkvY`g=?1h39je=`6nh(N=)AUM0Wi9H|EV)a1 zSVv^OqZ#)^SHSp!bP}yc3e1oTw^C>Hlc7_iO5|H3iI%Lp_M;$~>YENWz zt}iZpbhAreLA-fo?MEDRuLV_yZtu6DzY1zvi9m<@_^>W-HH%>T)dM14)??WW%*^f%N8l@_BHdl%vC4A!pa}oR#>B>{0MfUJoD2l%Rp=Fm8eB|A= z(e?HGi=o?lZQ%j_t+y-;!D2N|<0qBAjARG5CV+Vg{Sk%G(<`Zn`EQ%QSnQw7!OkLm zA3bz8s>;k7@?bf74(kGECjgr4yEfO~a6`)bv|P@DS6tWLo<}D@6-3NdUJ@I+zmb8I zbj(eTLt;h`Zhw|SJGSk8;aZOFxOKW%|2+fkr6AEKeONV#wLSFQLR5JCVQM0WxTmn#`hlSSu3IK=;n$IEN>qWh9>LP}*L zhUc#9`qXc`EHFlnfxJq>6cG%aK93ahHNR5impVlt5Ec0M4xv+*VxRkv@ggw8VqHf5 zP=j470E=fn^Ra*Pob2+{({Xic9KWIuTsOVfymTDLuxq_R*oqJ)vA43jTbM;?@<99R zwN0XsMHB^(IgASjh{qqXFit8kL(;ur~*Nt zoe^DgSVhCkJfGZlX>-gMl)1arum8oz`0-(4sDZ#ZRsT%mjM#h}aMC0zDbH0eol2kj z`}-4>e9tr|ReC<;xLk*mxx)R=BYXf3ez56KU^WmG7@|v5YGx{pbEE&&- zflP-TdXXDXRah^6*voWteU;Q zBH26XKG7B)B|}~8iyEt@oZ~r23Usj%bs2u?sIy8Q9v&eJufJfcuREy_*#3AIOp|&< zZpTo<9dZ=?LmwR$9ekJazR}pbvwh`Q9FOB{inJ{qk+JhLScmUkEn^%XnkV&Eed%4ls0$oETA(7&}j=t<8V~&ry}ux z=we?)gC`asKgSp`rS1qK;fZn^MFvs|+VSvnS@=kl1$?jWPY7J;##QE7Parb>PN4s7 z-7&xGq(1%qI=K(o!~zbp!ADb@Vy_LjZ|=8M7Zg}(ne+xjr`G>2@1G{dI)pP{2lIb0 z=Jdh&V~)=^e5FFX_-b%ysxfSbV*{PN?aA_|W~1uOp_g}kR75>emTvp3HBK5;{aoH` zZ_>Vcip^qd2V)0;fBC3@vfOGWYM`?^sd=bDEfa@4AF1B`cJW%2@YOaO;u*v>Wl;V} z$6RD|LspF&A}YU5-R}qgO&^*QnJQT7ve(%!J_x!WkuV6^!2vrauTmiN@GE)PpWOXv z5+w8^V`H9_rOi8kBf)v2Uj?gJ_amwxbNv8e6a{xU{$=3DD;@a)g(^C`a;HT3S&@jXOf$^MildGo;3IGZIr=NIp1(|KbZRr6j zlPs^#;x1mZ)TY~bd3&S98)OQ3T)(fpOaVs}18z_}fU-S4$^d-I>f)nnx_Kh{Ndm7| zx_@ZQ7w?2ba}4<{8Ou*oLP5xk+S}i#*p)X`DrLs<7aC#~6&q<`joEtI#p@g) zu;m-`@m7m0zrTqAmdW5=fTTF7>0z^Gr%kwWQB95eUEMK6c{#P>M0MZk}sL{=Sf1#VAGP1TcZS3~0r zE2*fXh{ayCMCE@4RN|l5&f`_VXsAl@O02?_7pefqO@UY=Pv+_kZ1o6Nqboe5Z__Eo zVgknxcEFYA^OSHEu!2F#cSa--4N_=1z8%yg%V1Z;ev_`S%SqYxt2VUR#P`c&ZSHjL zW?iele0OGJc4WxJt4Nup`Qr-!JFl@97&cIe7pFNFX1XFD-45MnWNTMsr%)%N1Xj21 zw{Qko)lBf(Ps~P)k!-|EyPdXOFn8j~`qjF4CE@w&;jQkbHDE1=`O)3;YL{(Nt8W{0 zDO<)Y$9ECU?qeEoC}8*U^6JXEZhUuZ?ACina6nc4sNd84e0~YrIPa>!4Z@Utn6dp3 z|DqB~m+#@HTi-kAlFo~ktw&uY(RpL5N)b5dH8w)M1zj3u(ROVWzg?nFUpNR{55cps zXC^&kYGxHhqkWAs^n3h&KqDUix>-LeDT7bXe(ReqMO*hIIh2x6n_C{B%*K*_vQk@S zvR(41cg*`2lF^ibi`oD!2+iU8RlF+ZSd}m(+Fj&;B<{s(d{&%1IDczaT*iIoCJOmy(*Q%5{sA#OlH5!v0IUslmb5wo?9IsJg|wcvjB?k8xxanOh#V z^EDQDOXFUYgW^S#P)#c7#-rBA6RF!=6g>$+7Jp9+xSh+3Y|(<>W!$n??(CQ532vRK z1Kpl)#2N>Lz#%)gfX+-exz|T@xY~=VB|XoNfyS__Wp6hG_tpFw_M9DzvWNvNd{C3> z&$iQyzxL$qQt9lFlsdK}P}+VBwdQZ%vfpV&PENPCD~igOmlZOYD7`l#w{5GqS=V-9 zJ?{%wH?g1q52 zS=)bcFwPE6K2KLApGV%2FxK`lAg(F8b;&GJLQqD=xQ5oUO)!62PP&U{*&lm>kNoT) zzOanrgv|%adO`o^o7WWT(kC6Nkw0K8#s&T7(j5;w|NN07tEsOgM((H8m*lT>mU}f8 zQFY&TAv}e0x+XfdP9h%bI{NoM0734EL#SY%2-4QAKpf$&$Mh*^u{m&f9ae2T8G zo6XT%I5IiYa!v7FGl`|c2s4r-5x|WH-)HZY@Bh*j|0Bk6=Yjzk-^AOx&u_M4~a&$+fa{YaINV~zK5i*3QKKEu_XGfHUpbRPQ`^uCF9l13_ z5*)rUl7!*)p#qVeS|!}dRhK&ElC6*9o6t(J?stEAmW^d~YbMftztuD&gW+?wNM+{Y z99`t;OS??b?@x#Lp$&1HHdI#7f7;+PzVI-w@i(0?{&&Z1{~SqgsRB8x;+Z6!JL*cR{E;_6I#5l38R%x>inGjXk0 zZlJXzmJy@cLMsE_Ue#4bVbsc?oCD-UXujz434$-L;`B6Dg|LnZ%kM{NbIm|zSSg%D zL4Pgf}YpX&-?FW75`IUtg61ln(!g=AWJ1P;Id--gKB8B;( z(*pB+)($xhbEfmu)`+j+)DIhT1*T5oW)s)N%(3?k9V zuA8$7WAeM}@$&pK(7(CzgSSzzf5(26@nC8J*z27DnIUj0a0T=MXZtLYGCTU-O0^3a zI4~_&L&Gv#2C8dob@ykIe_%GXeS3#z65W&^+SQm4JDh*{v0bvez7mI>>;<}_v=lAO zX#A*3Xl}JpG|tzET;;ZsP>uO7F_nc+Kd#f-#mD-yd7I*Xr{k!cXncv7GhQ}&DykC` zSW895S2~vXM7#9}R6bMKrzTYi6kUpjUbrQG14 zSKnY%zSxH|#DNi>bRGzmrK2PtlJ-5CdS5k5+)am1T_$s=XBAr$KVNGb3^VPOJwi@m z9qmv}Vtw56d|C`2@R802gb&R%P-woW>dsF@xPaEvD4+8cI9e}w>r2rS-3V(v6Wz4SU3hdDrM>_Vsb`m<| z)tzLipO?QhhPfHN8uGcZ5QraqmwNq4k;^FrsS+VWZKl+qwk%bBn- zK{dQ1^%yICk_Afsvi!h3fz%n*Q6Cs~@7VwUd`rZw_u4iPD-n61bn=9v& z8i%m#zut#a?i~f@yOV>pP{Mlx{QbVvd0VpS_Q$mscG4eUg%-WRN0ZyupmzN8%p~aX z^_O@l9=b50_G=#Y-v_+DSvB02U9tzfZA-a0@m2Iizli0tVU;(eSK$&;{m0KtWFi8 zIRZfhdM3Ji9Bj1V6zKe@Re`a_)HNzUzAcwUs=Lirg$GWKN_liYv;*K}iO3lz7H+uw z63&10Mv=mMI&8#$0l!XnR$8im?7QkKjBeM9OZC?f8H^&zqp7X&l+8>`Oc?x+X>Sa` z?53>tt973f+tav|SF-4uXZ=Oyj0>P31O$x6CO%geo@k_>M7y}~?tz&Lp4Kr!J_}~I zyqJ|c$!Gb33Xs|Yw07S;K^;_vfS0+Lv7M+WD|6_E4=Y98hlNaX>4T$T%kF>Z8y;Cp1JJ+UO zP5BlGo798y6ZvRMv}Un4aUeR-bhxhkvfLG`4Ht-;cHd}U`M&w=-&un_*s0&^3lIal zBYZ|CUtZpkz7g5-AX%E}ML=SqgTgZnZr}6{(7o$%H5?=SGaO0?5Ja2<^CJ9NWM!TjMTt|A!a~Qjh;t zmxPGOE}*9737$(qeho?~guWIypsajHc|<$AMOy`LMyWSXQByo5vLxa&$O5hR5@Bj)sN#Io6?Nd%qI8h(e86!1{a{QJyG-$ zqK*8`H0}IfqZ;xK zX7YII@3ovFBO)3|g@PJmVXHs<#g4AYvD4y~Kzfa2o%Q$G`Opn)!Ie2s4dhYo?M`dnaOrngn%k=zpp!fUy(_5SCE~1GuYg-L--MfSGJn+6n>mM>E-w(-;soJwU%+^pxQSa($U?RDmp zTzH>wQu{XSpxguWLQ+wB<;9t$jjUy6dg+`BRn3`k%ctXk>+*jOEXuU82 zA}+)jAN_WDhq6?rO`BlGvCw^DWA7B;f<=#xM`ONsQCi&&@;VPSKhS*mp6>o7o{!JE z;??hylcx4OQ;^59*7#fz;pbVdqhCVn@-u!BC*GlFLPKreqWe1j!K#S|sL<1OODH^F zX3Lhv{azl5e8-lzgzgGH8P`?@<+nlY|Ihu!&2V3`#;>kOg!5$=D8bkhtRtr$D=;sv zW7TgCc(tr@`*JhJ8suy)rQnzk?o{C+H+Oa(-R8aRv)y=l_H-@H%)goD8I|#UdER7U zBHJDW;0v=ubmQe0jwB%}My75K)*n;-Q5K?T+ZVLj+_o=!Gqfo~2Ve&toxZ-lji9aB zVfqY|bnTYTK7|tXf6Fw-!^kr}Y2Sx*?XuzWkAQ%HsZ_W4?yhg_vo2)1R1mOLQkGvY z!%TtMX&v@ZHuj|1`QP2K*GmL_yK3Yn8Fz>VcOy!VVuIgJHSZT6oi%5(gr?K<@luVmAPA^~e6Ti|YJ+6I@h@?r7>HcKS4{KfVR0`I(~sk^5p zu_*TTgBh8R{Aq-<`@GLBaP;RjbTDzi8l1S0Aca`mwb2#Zy5_g@tlC1Q5dYZA>V3?& ztnY~F8Bguk=^{XoXdOEhW{zS-dnbR~J+?En6SDhf_3SS84mmeObAfE~0~fCJ!I{Zs z`LVj{+4*@RT94xN+#+|C2{Johdf;V)`J?jt2=lBiM#ZY}yoj9n{A-M7-aqL?fX?<)O*r$pT@kyp6zL?m=1glSq zm>3;l_G}A9#w|*%P#%*zjx?Lt$yy=Cyq5^xYw{J~zyE{!KTlpkzp%N;NVaXQq@4U{ z?}#w`v6oOW{TH+Kg?d_8!D-#5JsgbvK*<%#6_!7oc%@l9Xeo2*9I7hQVp7OnWqv<* zOL^*I>#+KF!lOebK`IB0^}=(BXZAfX0@9_i8>r)YDN&biUl>>k@WOp&4w(aQtH_cp zSks9=)Tz3EZ&^&m>LuF6X*tH@tf_r#wQUj$O0HN+LAL9T5QOFswRla3@i=Q4i^^re zLfR4fXTTF|tpe{>4>G64i%giLSN*++tb1468a$S;iLm4U43~U!>6{iux2ZQ8UdnWk zE`{`JkfDY`p~7Fw&*yDl9i|ZfmugbRoK9?p2O_PJ<82&3UCMWs<+!d&wD7;`OuTm3 zJ?y@@_!)#oZblF$et6LMH}~Rz?1q^@jD6OxpR&^@-`07y+LRryMi*ltRMMosOK`FW zNfHz$mT(9%LY$eg%A38pZN-LJ4$Q2jRLnVjjHdV3!|Zl#AmPo?-f|PdLHoyq;jO}! z6ZpUwl%>fhio1zkw z7{k|-*m*_9rzlnZF?YrYwCx<@lT|NxN z?F{5i201mo8{nthleWRSFu$zfWS%5%?nhS$o!90d)8a2asqt;yu6&I58Tc(aa%LfmQk7PsSB9e zqA2(98P1Hh*~knx;itiE6Wex0drtANdc{Y&F?_Om zR=0dwkIX+~^~t_3{t+KINm8Zk*6EhS#~<4RCU(x5rCB?a8;trbSpF5-z$1+%Ng}Yw zg42uYSe7ufTxYFhg963#jOMn%8wYKM^AX^{O5dnI)iz-ry%Jt!e4@uIa)LY`O%z^z zXpDE|2giwqcYwn3ib8`S0T)@T&k87>EMJIMuKK0VXv_#1W@J|DUNQI`@?IDImYIvn z-yNy3#v5<(4p+qUF{C*q0>?&jgy$5SOAhz8Ui9VVo_2UHwgy%A&z!$w*3HMsMey|W z9f_FTu8}lby?GT-?_x=1(G?bpEA>_kh!v$&EI;tf%Wb!3`_N?hw|@=ph|^qru~7tf zCyFI`d}eZN5|+>txcAfJVgYQ_$~nH3F?$*(JvaAKmR@z#a+*h?R#pvyG?IttXGeWX`;%i5$EYs~GHW@I9lZIn%!y6J^-U*SEuO%%j8GZ{DFh@y>d7lC9}g9_u4wPEqf5I;jE@V{i^e zka3emX6(Ip(PED|eGX^w@TUGQZ!HWVYkv>J#WL6{Ql@C8Cq zWZoX=eq9=UMb1{FnI7Zab;kqN_FoZF(!+t`D2*z!G6jp2+6tygW7e387v+Ms|5~oYHzP&nuE0&|&EL4XC<^4&=jBNCbCMfEIsF zHxgxhI#`%#&XJQb4f`|-faD#WJ})y%>*^n$k2k85l)-5r-Z>Iyf?eY`$V$j#qr2mX zF$L|erz<4c9nm)k%%(ao68FRNZumF5wekoP6|qPNpKQXZA=IFb8NAT7a!N373#N^B z)KHLz?``nLwU{}Mlk_hbf^NJGo_e2lTxF&gx-~w}5vUX}2kP`BA!AF|{zXM_Cfjgj z8&gx3ujMHnW1LNmO%$ye7RzRi$2!>V_wV1|a49H6Ut@W3NeFv90w{ao?i%5_Nih?M zPu=?k1m=z_^)T~bS^DmqFPk(RI?#R1bVx+zzGu>j_2TX#W1ioBlk{~tE{0kt=-^dC z@4)yPN#gm28~Y@xaPnS3{m`YYrBj(li}vMiC4L;tAeRjWbawn8>c+IV-(A}xQ;<-gMW8P~DQ7$c122nzLgjA8^ZCT(gOw9D9yIuLGXe_@h zelMJM%RjdGO9qSXKzQcBz4kt9M|xJrE<|22-#MnYZgi#Jt?BzD28s=i79Dv0R3~!t zA3^Q9gezrrDgX_Ny8)2qpP=+vGqC7hNe0~MCFUBSy_%p7X7!b7OcC*@ge8W1$dS^0(ntn z1k>18F9Abz_>81(RDv*?;DqcsNn@`5+KWI`2R6IwwvWPy)B&P#nudr$fv*=a3 zqFgA>^DBS`JZALX5(uwl1YX@`fqUC|@z7}G*R-jp3}{m%Gcftk?c9SGL+>o504_E| zuKC>kx-0`o!4WVQsp-#w*hIOwEE`P${@ix<1T?GT=(aN$Xf`Dk@j@IX0Y@zG$o2ApUwi-v1AKtQZVZW7evt8ggt?nx-Q(*+ z%_C&kJm-!#=s9$PxiWe%S!^y6g!>4WuC2VU@p&F1+8eI6^D2db=_0gFWuFOamiTSe ze(Gv^5&e^$N1(TN61uSa`%BaOj99~H=*f)O2hlI5&#jKVbaT|)b!Ypc^6|nt@UH!~ z2S=!1>+$buO?tk7{oM?3M%^ah!MWCxP_Usm4jfV|Pw!2D$;%oX5sAuz*5uY#2h8eC z$2WL?cTQdsEEJI&#g|0}NWJ%w6cX`0G6vh$u*28S=C%pT64`G_Gox@on&TL>ujj&|5 z)O-r)rd2W8AI*E#yk`G$`DyXk$AUffE~46OJ!c^@Cmx#*_exVD+WPw#wsA3N^SP6# z6Rm(Y`|o3|vRcuMHYw4`Dz+^}gNdjWSwTqw%{)Q>)0NKcfAWRtR4z&lWR$p(R23mb-O+god@YZfbZ(kW+*ZebdqVyLmk4s2QhCdj_ib z{lgA`o27x&5dR;UZIWZt2P1rVO<>&kc}-w;Kgp}uy6F8G{BTSrB3!sD@2+yqb7Sx*0>g|3 zjZTNS34I(rnxY)C#J{6XeBYWZ)A~229daM%EWqTM$UFk!=Tfz}@MxuIc+D+^Np$w! z=`Q$$SU+e^c_l)YbkecZ@=^0=KI=6n5d?mNhj?KuB$C#t3D~#1XFeU+AMGN zQ|$BwWam{B4y>Kr2XWJv!UJ^TZF*$H{PEX7ZT!g<$e{@q#d#qqk&O--vJPr5UOe_u zo&u8|!Y>F4RptW zl^*?Z-cqtWTh2+|l2)*pti74obdk>kcfbtOhXmM!$FkiKM}R-cf&t;}GFhmjOk2QLdd~hZ9Vp|K_Iha(vIh(2*$8OoKTGB=Md`VO7>aQ(wB3$l4 zaKQ z{PBz4pYd}i?@~BBINRCTX-FtyC`5TQf7Flay|CBjCWZiN#(o%ZUS^k>PI!3|S#>>c zQKR^)~P!}H^Ayv5yUjyv@2ix;Prz< zQslLs`M_}LZ?Lvn@jh%H-W^zD(o#`%VtApyw*NWd?-q!zHoiN-qTw-MxI@u_RGqlg zszAA|Z5EvBT*FO%N99knb-snVz7;&bIbRg;P3;?+nk{kS-aHtKokqGV-G^PvODOOt z-YhvHqz4)q5goLlrKYZ~6fowlBc9KO1Lhf}Vqd(wPU;Bo$ zh&P&ufKR5NcWiI>tLlqdmrbm`L|_a?i$y=#JAlJBE5KER%}41`6!5(dUEm{WRc}1v zfD?$mo7QvFHGUB~if#?Ywr8xg2zoDNke!-}zi~5#W~^w&!MhYSAXYdOmH*7nO|V`} zJk3{XXXHsg)2~~z5FdlxTrP8WH`GJhM_%CpH zjg<%=^K#A_!*#Z4ne-Eh*&PL_ZtV!pBdcH`W^RHK#6EPrnavHid|KAt3a1X4S3f#F zF-TXY5s8L~QsY&XS#}3N0~(VaeT8xtP^oBC4gB3|2O#hQOLLx4mxVdF37&Vri>%d1 z_lMx!iX1Rm9>EDUF_)IvW3Q~m>8{R+1pP^Y!Bk(fY6ekIwj{0LxL;vYQysF~Vc8%I zI6=C@I?q%I3o!wJq&XY+6cN^!0z09(i4%ILD*H+qxyHamdG8nJq5?#_Jm!jU z8!<*|y^*(7AFa0=^sCf-KBWel2G@f0Fv&eS|E+i7k7QXKnp>I$ujov;Ck02NW zFna(|V4U+-gSMC&nLjdDqb2_JY0xg)BkI*GQb}AQM=8qyEzW`+ZY&u5T&W=J81<)6 znTbw25J()<+ytdvo; z5ZVlMTaV=8U_~N;iJD$^qU0CssS`ZlLwZr?KZ#U{f1vfMOm&Bx02%&IKw@G3oIGpT z0VgraRqZ~(yvBuVWZI^Gp3L`L6z9~_4D{4fr?2&8$cMnXG-A|yMK zq{;3VQc@YKuf3xLlFh6edJgg0+uX!a3fn|1M6MDu!T8&jZG+!wGHJ7J8r^DW3~XLK z#>h$W+)z~zrjCFHd-(3|xITR}NicZJq8^_tCHFmFm}*cJ9k##ht{CAlf0z0cpr7So^7g63(>FtbcGOr7F)|;t>Xtn@ zK-p5H{aYA1R+YH<=84xwrc-FMEj%SgD{kshppzPc;*VM4c?8+pQo*R8ao!-&-9jA# zuqGU=xrQvrhM*N|v*zW3ILM5mcjm{3%SM0WeY=&y*gP2S^e0$_XHe2}&P(coxz)Vb4HeRwlqQzZ)zN; z#TKqq!w8F~N6^azD=UXm(5tZA>8k5MPCA9~^Km=CwpOf_K}#lPfQ8}-AM&o*#CXOC z$R{hzQ^};!q0?*fxf|i%9Dy9bWv+BjmsQc)GOtfgz85*WLO>mRC8b?ys9RZZYh(Ng zy|WgtMTpINn%K$-7Yqi^?Hv?1bwDyK&l*NxT2=1k>gF;$6{-OkCO{j+nXIbmSh%ndT3^T>%ie@?d&YUyuydY&Ada&^EDZIH@V3m^?LLYotEd~@;9N?0rp9ZSYO7?0nO_U5wDW2-T=v88vKRRPRJVU` zv<3%m|GZ0P}KRXU68 zB6rpR9qS#IXR>l)sYZf#!JR%$Oq*6wbrpZC)mcU04=vA%$+4#NZflixIHA=I?z)7( z3Cze)Fc@S$91|1MdN-jj$S$CNnWO!ajD&yy`7!{gt;cl3TFa{5hA~qAs~$?YC>cXm z;Q*cz`sp9bA3k4&WAW8vWcd9w&M%AJ?_O*@`vRh>*`LJ;*o60_K;b_!9S)|~I2DkA z@3mbOzNCr!VPRuq#c;B`(!uB6zNpAR4}91xRVsnc_@On0fTD8t z4C|O86Yc($3$ig}Xudra$71VuImOG*(lSFh2iTeHOvIFUTtUmLt~$xp4&nr}2b3_> zlWb_*?XL7Rd7Ucx;b1TiU><}yxXjd-4`5Z|gwt0NRN7x=k7c^j1r@Uu_iOX20`Chw zva`pOjodesJEDIxj37Q{^r&XgrH`n+BE{U!#Wi(u;=dWC-xuh*XN6>zdlLGwca494 zl4Yaq&I$OI0pvFsA>U4dmPxU}gd)%mG0M2q|L+?I+23 zxZOMj(Fetr0w*%^y1kti-*X$pI8cms4;wEpaSHal7CgwPe%#YBu!nSvbazL*y2+dr zBosN-RO2G^-cwtvp*8TBIu@T=dq(wO@!FT#{{89tByoo0)d ztl$$t;~nz-p)n^3TJP&|6XnPRn16X6!OX$bH|>9S(iQ2!F{IEb($Q)=Gq3k;OBwhw zfQ0q^p_#a~5HOt`McGF;HO9dOG8`InNmhy*Rp0TH8qy+znu-K5_e*J(A_=KWs>SdN zb=J*9j^Q|L$Ca*+KmQ|Mp?mdR3N$QfyX2|+aT}F^cexm5PYQR%Gr@c>D{q$f&UBOE zuq!Ch0XPKVpc=Lb!lI<;uj0&HJv*9F8uN<*?elo6C>ODz3N-cMG$?ZcX3cjMlvYAy zWBx>$LxCz8N+>=0ccGkg`V*MP(;ty3!2ym9afWuk4o@ne_&ooZ&NQ19vSjjZ zF_co8dOz&n-TEdnox!*=20RfCM%wz4OnFS>J!UtgJBe`eZnZ{O^yJ;je{`w8$VjgZ z-FJ9!bT%w+E>b0L_3u`^G_xs{`gQjt1*B?eJsN4L{w=+04VMWfv zus3P*DW;ba`kZg z(@61helZY(h^^&HB#ocg7_-M*GN$P>ycljvT>R<^&$)SdOSYE(w2WtM;=ixEfS>Ne z?0HsB=sSOiU$1#n?6wZ?x05d0vp&OcAT>bpG)-j&KNmhQ>G#nN+ciDy&{kh0E7cGK zI29swnzk-&=1$x35Pt8PVP>f=EgK(AISwAhKUQO None: + """ + Displays the main menu options for the TASA application. + """ + ascii_art = ( + "████████╗ █████╗ ███████╗ █████╗ \n" + "╚══██╔══╝██╔══██╗██╔════╝██╔══██╗\n" + " ██║ ███████║███████╗███████║\n" + " ██║ ██╔══██║╚════██║██╔══██║\n" + " ██║ ██║ ██║███████║██║ ██║\n" + " ╚═╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝\n" + ) + print(ascii_art) + print("Welcome to TASA, please make your selection.") + print("Enter 'Q' to quit.\n") + menu_options = [ + "0. Display available selections.", + "1. Create a DB file for a project.", + "2. Copy data from initial table to another (dev, test, prod).", + "3. Pull data from ARVA.", + "4. Insert data to ARVA.", + ] + for option in menu_options: + print(option) + + +def main() -> None: + """ + Main function to handle the TASA application's user interactions. + """ + display_actions() + + while True: + user_input = input("\nMake a selection: ").strip().upper() + + if user_input == "Q": + print("Exiting program.") + break + if user_input == "0": + display_actions() + elif user_input == "1": + handle_create_db() + elif user_input == "2": + handle_copy_table() + elif user_input == "3": + handle_pull_data() + elif user_input == "4": + handle_insert_data() + else: + print("Invalid selection. Please choose 1, 2, 3, 4, or Q.") + + +def handle_create_db() -> None: + """ + Handles the creation of a new database file for a project. + """ + while True: + db_name = input("\nEnter new project name: ").strip().lower() + if helper.valid_project_name(db_name): + if not os.path.exists(f"{db_name}.db"): + db_act.create_db(f"{db_name}.db") + print(f"Database '{db_name}.db' created successfully.") + break + print("Project already exists!") + + +def handle_copy_table() -> None: + """ + Handles copying data from the initial table to another environment. + """ + while True: + db_name = input("\nEnter existing project name: ").strip().lower() + if helper.valid_project_name(db_name) and db_act.db_exists(db_name): + while True: + source_env = ( + input("\nSelect source table (dev, test, prod): ").strip().lower() + ) + target_env = ( + input("\nSelect target table (dev, test, prod): ").strip().lower() + ) + if helper.check_target_env(source_env) and helper.check_target_env( + target_env + ): + db_act.copy_table(f"{db_name}.db", source_env, target_env) + print( + f"Data copied from {source_env} to {target_env} for project '{db_name}'." + ) + return + + +def handle_pull_data() -> None: + """ + Handles pulling data from ARVAfor a + specified project and environment. + """ + while True: + db_name = input("\nEnter existing project name: ").strip().lower() + if helper.valid_project_name(db_name) and db_act.db_exists(db_name): + while True: + target_env = ( + input("\nSelect source environment (dev, test, prod): ") + .strip() + .lower() + ) + if helper.check_target_env(target_env): + token = helper.get_arva_token(target_env) + article_ids = input( + "Enter ARVA article ID(s), separated by commas: " + ).strip() + + config = { + "db": f"{db_name}.db", + "env": target_env, + "bearer_token": token, + "graphql_url": helper.get_env_url(target_env), + } + + prog.get_arva_records(config, article_ids) + print( + f"Data pulled for project '{db_name}' in environment '{target_env}'." + ) + return + + +def handle_insert_data() -> None: + """ + Handles inserting data into ARVA for a specified project and environment. + """ + while True: + db_name = input("\nEnter existing project name: ").strip().lower() + if helper.valid_project_name(db_name) and db_act.db_exists(db_name): + while True: + target_env = ( + input("\nSelect target environment (dev, test, prod): ") + .strip() + .lower() + ) + if helper.check_target_env(target_env): + token = helper.get_arva_token(target_env) + graphql_url = helper.get_env_url(target_env) + + if not graphql_url: + print( + f"Error: GraphQL URL for environment '{target_env}' not found." + ) + return + + prog.process_records( + f"{db_name}.db", + target_env, + token, + graphql_url, # Ensure this is a valid string + ) + print( + f"Data inserted for project '{db_name}' in environment '{target_env}'." + ) + return + + +if __name__ == "__main__": + main() diff --git a/src/prog.py b/src/prog.py new file mode 100644 index 0000000..8a60413 --- /dev/null +++ b/src/prog.py @@ -0,0 +1,942 @@ +"""TASA Main Logic""" + +from datetime import datetime +from typing import Callable, List, Dict, Tuple, Optional, Any +import sqlite3 +import requests +import urllib3 +import db_act + + +# Disable InsecureRequestWarning +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + +def get_last_run_info(cursor: sqlite3.Cursor) -> Optional[datetime]: + """ + Retrieves the last synchronization timestamp from the last_run table. + + Args: + cursor: SQLite database cursor. + + Returns: + datetime or None: The last synchronization datetime, + or None if not found or an error occurs. + """ + try: + cursor.execute( + "SELECT * FROM last_run ORDER BY last_sync_timestamp DESC LIMIT 1" + ) + last_run_info = cursor.fetchone() + + if last_run_info: + last_sync_timestamp = last_run_info[1] + if isinstance(last_sync_timestamp, str): + return datetime.strptime(last_sync_timestamp, "%Y-%m-%d %H:%M:%S") + + print("No last run information found.") + return None + + except (ValueError, IndexError) as error: + print(f"Error retrieving last run information: {str(error)}") + return None + + +def fetch_all_records(cursor: sqlite3.Cursor, env_table_name: str) -> List[Tuple]: + """ + Fetches all records from the specified environment table that meet the synchronization criteria. + + Args: + cursor: SQLite database cursor. + table_name (str): The base name of the table. + env (str): The environment identifier (e.g., 'dev', 'test', 'prod'). + + Returns: + list: A list of rows retrieved from the table. + """ + try: + last_sync_datetime = get_last_run_info(cursor) + + cursor.execute( + f""" + SELECT article_id, locale, title, tags, path, content + FROM {env_table_name} + WHERE modified_timestamp IS NULL OR modified_timestamp > ? + """, + (last_sync_datetime,), + ) + return cursor.fetchall() + + except sqlite3.Error as error: + print(f"Error fetching records: {str(error)}") + return [] + + +def insert_arva_records( + db: str, env: str, response_data: Dict, callback: Callable[[str], None] = print +) -> None: + """ + Inserts ARVA records into the appropriate tables in the SQLite database. + + Args: + db (str): The database file name. + env (str): The environment identifier (e.g., 'dev', 'test', 'prod'). + response_data (dict): The ARVA data to insert. + callback (Callable): A function for logging messages, default is `print`. + """ + conn = sqlite3.connect(db) + cursor = conn.cursor() + table_name = db.replace(".db", "") + + try: + # Insert `pages` data and get page_id + env_table_name = f"{table_name}_{env}" + page_id = _insert_page_data(cursor, env_table_name, response_data) + + # Insert related ARVA data + _insert_arva_institution(cursor, env_table_name, page_id, response_data) + _insert_arva_legal_act(cursor, env_table_name, page_id, response_data) + _insert_arva_page_contact(cursor, env_table_name, page_id, response_data) + _insert_arva_related_pages(cursor, env_table_name, page_id, response_data) + _insert_arva_service(cursor, env_table_name, page_id, response_data) + + # Commit changes and notify success + conn.commit() + callback( + f"Data saved successfully in the database for article ID {page_id} " + f"in environment: {env}" + ) + + except sqlite3.Error as error: + callback(f"Database error: {str(error)}") + finally: + cursor.close() + conn.close() + + +def _insert_page_data( + cursor: sqlite3.Cursor, env_table_name: str, response_data: Dict +) -> int: + """Inserts page data into the main table and returns the page_id.""" + page_data = response_data["data"]["pages"]["single"] + tags = ";".join( + tag["title"] for tag in page_data["tags"] if isinstance(tag["title"], str) + ) + + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name} (article_id, locale, title, tags, path, content) + VALUES (?, ?, ?, ?, ?, ?) + """, + ( + page_data["id"], + page_data["locale"], + page_data["title"], + tags, + page_data["path"], + page_data["content"], + ), + ) + return page_data["id"] + + +def _insert_arva_institution( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA institution data.""" + for institution in response_data["data"]["arvaInstitution"][ + "getArvaInstitutionsForPage" + ]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_institution ( + id, pageId, name, url, isResponsible + ) + VALUES (?, ?, ?, ?, ?) + """, + ( + institution["id"], + page_id, + institution["name"], + institution["url"], + bool(institution["isResponsible"]), + ), + ) + + +def _insert_arva_legal_act( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA legal act data.""" + for legal_act in response_data["data"]["arvaLegalAct"]["getLegalActsForPage"]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_legal_act ( + id, pageId, title, url, legalActType, globalId, groupId, versionStartDate + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + legal_act["id"], + page_id, + legal_act["title"], + legal_act["url"], + legal_act["legalActType"], + legal_act["globalId"], + legal_act["groupId"], + legal_act["versionStartDate"], + ), + ) + + +def _insert_arva_page_contact( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA page contact data.""" + for contact in response_data["data"]["arvaPageContact"][ + "getArvaPageContactForPage" + ]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_page_contact ( + id, contactId, pageId, role, firstName, lastName, company, email, phone + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + contact["id"], + contact["contactId"], + page_id, + contact["role"], + contact["firstName"], + contact["lastName"], + contact["company"], + contact["email"], + contact["phone"], + ), + ) + + +def _insert_arva_related_pages( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA related pages data.""" + for related_page in response_data["data"]["arvaRelatedPages"][ + "getRelatedPagesForPage" + ]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_related_pages (id, pageId, title, locale) + VALUES (?, ?, ?, ?) + """, + ( + related_page["id"], + page_id, + related_page["title"], + related_page["locale"], + ), + ) + + +def _insert_arva_service( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, response_data: Dict +) -> None: + """Inserts ARVA service data.""" + for service in response_data["data"]["arvaService"]["getArvaServicesForPage"]: + cursor.execute( + f""" + INSERT OR IGNORE INTO {env_table_name}_arva_service (id, pageId, name, url) + VALUES (?, ?, ?, ?) + """, + ( + service["id"], + page_id, + service["name"], + service["url"], + ), + ) + + +def get_arva_records( + config: Dict[str, Any], article_ids: str, callback: Callable[[str], None] = print +) -> None: + """ + Fetches ARVA records for the specified article IDs from a GraphQL API + and stores them in the database. + + Args: + config (dict): Configuration containing the database, environment, + authentication, and GraphQL URL. + article_ids (str): Comma-separated string of article IDs. + callback (Callable): A function for logging messages, default is `print`. + """ + graphql_url = config["graphql_url"] + headers = { + "Authorization": f"Bearer {config['bearer_token']}", + "Content-Type": "application/json", + } + + arva_records_query = """ + query($id: Int!) { + pages { + single(id: $id) { + id + title + tags { + id + title + } + path + content + locale + editor + isPublished + authorId + authorName + authorEmail + creatorId + creatorName + creatorEmail + createdAt + updatedAt + } + history(id: $id) { + trail { + versionId + versionDate + authorId + authorName + actionType + valueBefore + valueAfter + } + total + } + } + arvaInstitution { + getArvaInstitutionsForPage(pageId: $id) { + id + name + url + isResponsible + } + } + arvaLegalAct { + getLegalActsForPage(pageId: $id) { + id + globalId + groupId + title + url + versionStartDate + createdAt + updatedAt + legalActType + } + } + arvaPageContact { + getArvaPageContactForPage(pageId: $id) { + id + role + firstName + lastName + contactId + company + email + phone + } + } + arvaRelatedPages { + getRelatedPagesForPage(pageId: $id) { + id + title + locale + } + } + arvaService { + getArvaServicesForPage(pageId: $id) { + id + name + url + } + } + arvaSdgMeta { + getArvaSdgMetaForPage(pageId: $id) { + id + isSdg + country + serviceTypeCode + nuts3Code + lauCode + annexiTopicsCode + annexiiTopicsCode + annexiiiServiceCode + } + } + } + """ + + for article_id in [int(id.strip()) for id in article_ids.split(",")]: + variables = {"id": article_id} + payload = {"query": arva_records_query, "variables": variables} + + try: + response = requests.post( + graphql_url, + json=payload, + headers=headers, + verify=False, + timeout=10, # nosec: + ) + response_data = response.json() + + # Check for errors in the response + if "errors" in response_data: + unique_errors = set() # Use a set to collect unique error messages + for error in response_data["errors"]: + error_message = error.get("message", "Unknown error") + unique_errors.add(error_message) # Add error to the set + + # Log unique error messages + for unique_error in unique_errors: + callback( + f"Error fetching data for article ID {article_id}: {unique_error}" + ) + + continue # Skip processing this article_id if errors are present + + # Process valid data + if response.status_code == 200 and "data" in response_data: + insert_arva_records(config["db"], config["env"], response_data) + callback(f"Records for article ID {article_id} have been inserted.") + else: + callback( + f"Failed to fetch data for article ID {article_id}: {response.status_code}" + ) + + except requests.RequestException as error: + callback(f"Error fetching data for article ID {article_id}: {str(error)}") + + +def get_graphql_mutations() -> Tuple[str, str]: + """ + Retrieves the GraphQL mutations for creating a page and handling follow-ups. + + Returns: + Tuple[str, str]: The create mutation and follow-up mutation strings. + """ + create_mutation = """ + mutation ( + $content: String!, + $description: String!, + $editor: String!, + $isPrivate: Boolean!, + $isPublished: Boolean!, + $locale: String!, + $path: String!, + $publishEndDate: Date, + $publishStartDate: Date, + $scriptCss: String, + $scriptJs: String, + $tags: [String]!, + $title: String! + ) { + pages { + create( + content: $content, + description: $description, + editor: $editor, + isPrivate: $isPrivate, + isPublished: $isPublished, + locale: $locale, + path: $path, + publishEndDate: $publishEndDate, + publishStartDate: $publishStartDate, + scriptCss: $scriptCss, + scriptJs: $scriptJs, + tags: $tags, + title: $title + ) { + responseResult { + succeeded + errorCode + slug + message + __typename + } + page { + id + updatedAt + __typename + } + __typename + } + __typename + } + } + """ + follow_up_mutation = """ + mutation ( + $pageId: Int! + $institutionInput: [ArvaInstitutionInput] + $legalActInput: [ArvaLegalActInput!]! + $pageContactInput: [ArvaPageContactInput!] + $relatedPagesInput: [ArvaRelatedPagesInput!] + $serviceInput: [ArvaServiceInput!] + ) { + arvaInstitution { + saveArvaInstitutionsForPage(pageId: $pageId, input: $institutionInput) { + succeeded + message + __typename + } + } + arvaLegalAct { + createArvaLegalAct(pageId: $pageId, input: $legalActInput) { + succeeded + message + __typename + } + } + arvaPageContact { + saveArvaPageContacts(pageId: $pageId, input: $pageContactInput) { + succeeded + message + __typename + } + } + arvaRelatedPages { + saveRelatedPages(pageId: $pageId, input: $relatedPagesInput) { + succeeded + message + __typename + } + } + arvaService { + saveArvaServicesForPage(pageId: $pageId, input: $serviceInput) { + succeeded + message + __typename + } + } + } + """ + return create_mutation, follow_up_mutation + + +def prepare_record_variables(row: Tuple) -> Dict: + """ + Prepares variables for the initial GraphQL mutation for a single record. + + Args: + row (Tuple): A tuple containing the article data + (article_id, locale, title, tags, path, content). + + Returns: + Dict: A dictionary containing the `article_id` and `variables` + for the GraphQL mutation. + """ + article_id, locale, title, tags, path, content = row + return { + "article_id": article_id, + "variables": { + "content": content, + "description": "", + "editor": "code", + "isPrivate": False, + "isPublished": False, + "locale": locale, + "path": path, + "tags": tags.split(";"), + "title": title, + }, + } + + +def process_record( + cursor: sqlite3.Cursor, + env_table_name: str, + api_config: Dict[str, Any], + row: Tuple[Any, ...], + callback: Callable[[str], None] = print, +) -> None: + """ + Processes a single record by executing the initial and follow-up GraphQL mutations. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + api_config (Dict[str, Any]): API configuration containing the GraphQL + URL, headers, and mutation strings. + row (Tuple[Any, ...]): A tuple containing the article data. + callback (Callable[[str], None]): A callback function for logging. Defaults to `print`. + + Returns: + None + """ + try: + record_data = prepare_record_variables(row) + article_id = record_data["article_id"] + variables = record_data["variables"] + + # Execute the initial GraphQL mutation + response_data = execute_graphql_mutation(api_config, variables, "create") + if not response_data or not response_data.get("data"): + callback( + f"Failed to process record for path: {variables.get('path', 'unknown')}" + ) + return + + create_result = response_data["data"]["pages"]["create"] + if not create_result or not create_result["responseResult"]["succeeded"]: + error_message = create_result.get("responseResult", {}).get( + "message", "Unknown error" + ) + callback(f"Failed to create page: {error_message}") + return + + page_id = create_result["page"]["id"] + + # Update the database with success + update_record_status(cursor, env_table_name, page_id, variables) + + # Fetch and process related data + related_data = fetch_related_data(cursor, env_table_name, article_id) + handle_follow_up_mutation(api_config, page_id, related_data, callback) + except Exception as error: # pylint: disable=broad-except + callback(f"Error while processing record: {error}") + + +def update_record_status( + cursor: sqlite3.Cursor, env_table_name: str, page_id: int, variables: Dict[str, str] +) -> None: + """ + Updates the status of a record in the database after a successful operation. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + page_id (int): The ID of the created page. + variables (Dict[str, str]): The variables used for the GraphQL + mutation, including `path` and `locale`. + + Returns: + None + """ + try: + query = ( + f"UPDATE {env_table_name} " + "SET exp_article_id = ?, status = 'succeeded' " + "WHERE path = ? AND locale = ?" + ) + parameters = ( + page_id, + variables.get("path"), + variables.get("locale"), + ) + + if not parameters[1] or not parameters[2]: + raise ValueError("Missing required variables: 'path' or 'locale'") + + cursor.execute(query, parameters) + except sqlite3.Error as db_error: + raise RuntimeError( + f"Database error while updating record status: {db_error}" + ) from db_error + except ValueError as value_error: + raise ValueError(f"Invalid input: {value_error}") from value_error + + +def execute_graphql_mutation( + api_config: Dict[str, Any], variables: Dict[str, Any], mutation_type: str +) -> Dict[str, Any]: + """ + Executes a GraphQL mutation (create or follow-up). + + Args: + api_config (Dict[str, Any]): API configuration containing the GraphQL + URL, headers, and mutation strings. + variables (Dict[str, Any]): Variables for the GraphQL query. + mutation_type (str): The type of mutation ("create" or "follow_up"). + + Returns: + Dict[str, Any]: The response data from the API call. + + Raises: + ValueError: If the mutation type is not found in the API configuration. + RuntimeError: If the request fails or returns a non-200 status code. + """ + if mutation_type not in api_config: + raise ValueError(f"Invalid mutation type: {mutation_type}") + + payload = {"query": api_config[mutation_type], "variables": variables} + + try: + response = requests.post( + api_config["graphql_url"], + json=payload, + headers=api_config["headers"], + verify=False, # nosec: + timeout=10, + ) + response.raise_for_status() # Raise an exception for non-2xx responses + return response.json() + except requests.exceptions.RequestException as request_error: + raise RuntimeError( + f"GraphQL mutation failed: {request_error} - {response.text}" + ) from request_error + + +def handle_follow_up_mutation( + api_config: Dict[str, Any], + page_id: int, + related_data: Dict[str, Any], + callback: Callable[[str], None], +) -> None: + """ + Handles the follow-up GraphQL mutation to save related data for a page. + + Args: + api_config (Dict[str, Any]): API configuration containing the GraphQL + URL, headers, and mutation strings. + page_id (int): The ID of the created page. + related_data (Dict[str, Any]): Related data for institutions, legal acts, contacts, etc. + callback (Callable[[str], None]): A callback function for logging. + + Returns: + None + """ + related_data["pageId"] = page_id + + try: + response_data = execute_graphql_mutation(api_config, related_data, "follow_up") + if response_data.get("data"): + callback(f"Successfully processed related records for pageId: {page_id}") + else: + callback(f"Failed to process related records for pageId: {page_id}") + except RuntimeError as error: + callback(f"Error during follow-up mutation for pageId {page_id}: {error}") + + +def fetch_related_data( + cursor: sqlite3.Cursor, env_table_name: str, article_id: int +) -> Dict[str, List[Dict[str, Any]]]: + """ + Fetches and formats related data (institutions, legal acts, contacts, etc.) + from the database for the follow-up GraphQL mutation. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + article_id (int): The article ID. + + Returns: + Dict[str, List[Dict[str, Any]]]: A dictionary containing related data for + institutions, legal acts, contacts, related pages, and services. + """ + # Fetch institution data + cursor.execute( + f""" + SELECT id, name, url, isResponsible + FROM "{env_table_name}_arva_institution" + WHERE pageId = ? + """, + (article_id,), + ) + institutions = [ + { + "id": row[0], + "name": row[1], + "url": row[2], + "isResponsible": bool(row[3]), # Convert to Boolean + } + for row in cursor.fetchall() + ] + + # Fetch legal act data + cursor.execute( + f""" + SELECT title, url, legalActType, globalId, groupId, versionStartDate + FROM "{env_table_name}_arva_legal_act" + WHERE pageId = ? + """, + (article_id,), + ) + legal_acts = [ + { + "title": row[0], + "url": row[1], + "legalActType": row[2], + "globalId": row[3], + "groupId": row[4], + "versionStartDate": row[5], + } + for row in cursor.fetchall() + ] + + # Fetch page contact data + cursor.execute( + f""" + SELECT contactId, role, firstName, lastName, company, email, phone + FROM "{env_table_name}_arva_page_contact" + WHERE pageId = ? + """, + (article_id,), + ) + contacts = [ + { + "id": row[0], + "role": row[1], + "firstName": row[2], + "lastName": row[3], + "company": row[4], + "email": row[5], + "phone": row[6], + } + for row in cursor.fetchall() + ] + + # Fetch related page data + cursor.execute( + f""" + SELECT id, title, locale + FROM "{env_table_name}_arva_related_pages" + WHERE pageId = ? + """, + (article_id,), + ) + related_pages = [ + {"id": row[0], "title": row[1], "locale": row[2]} for row in cursor.fetchall() + ] + + # Fetch service data + cursor.execute( + f""" + SELECT id, name, url + FROM "{env_table_name}_arva_service" + WHERE pageId = ? + """, + (article_id,), + ) + services = [ + {"id": row[0], "name": row[1], "url": row[2]} for row in cursor.fetchall() + ] + + # Construct and return the related data dictionary + return { + "institutionInput": institutions, + "legalActInput": legal_acts, + "pageContactInput": contacts, + "relatedPagesInput": related_pages, + "serviceInput": services, + } + + +def fetch_table_data( + cursor: sqlite3.Cursor, env_table_name: str, article_id: int, table_suffix: str +) -> List[Dict[str, Any]]: + """ + Fetches data from a specific related table. + + Args: + cursor (sqlite3.Cursor): The SQLite database cursor. + env_table_name (str): The environment-specific table name. + article_id (int): The article ID. + table_suffix (str): The suffix for the related table (e.g., "arva_institution"). + + Returns: + List[Dict[str, Any]]: A list of dictionaries representing the rows fetched from the table. + """ + try: + table_name = f"{env_table_name}_{table_suffix}" + query = "SELECT * FROM ? WHERE pageId = ?" + cursor.execute( + query, + ( + table_name, + article_id, + ), + ) + columns = [col[0] for col in cursor.description] + return [dict(zip(columns, row)) for row in cursor.fetchall()] + except sqlite3.Error as db_error: + raise RuntimeError( + f"Error fetching data from table " f"'{table_name}': {db_error}" + ) from db_error + + +def get_api_config(bearer_token: str, graphql_url: str) -> Dict[str, Any]: + """ + Creates and returns the API configuration required for making GraphQL requests. + + Args: + bearer_token (str): The bearer token used for API authentication. + graphql_url (str): The URL of the GraphQL endpoint. + + Returns: + Dict[str, str]: A dictionary containing the API configuration, including: + - "graphql_url" (str): The GraphQL endpoint URL. + - "headers" (dict): HTTP headers for the request, including the Authorization token. + - "create" (str): The GraphQL mutation string for creating a page. + - "follow_up" (str): The GraphQL mutation string for follow-up operations. + """ + if not bearer_token or not graphql_url: + raise ValueError("Bearer token and GraphQL URL must be provided.") + + headers = { + "Authorization": f"Bearer {bearer_token}", + "Content-Type": "application/json", + } + create_mutation, follow_up_mutation = get_graphql_mutations() + + return { + "graphql_url": graphql_url, + "headers": headers, + "create": create_mutation, + "follow_up": follow_up_mutation, + } + + +def process_records( + db: str, + env: str, + bearer_token: str, + graphql_url: str, + callback: Callable[[str], None] = print, +) -> None: + """ + Processes all records in the database, creating pages and handling related data. + + Args: + db (str): The database file path. + env (str): The environment identifier (e.g., dev, test, prod). + bearer_token (str): The authentication token for API requests. + graphql_url (str): The GraphQL API URL. + callback (Callable[[str], None]): A callback function for logging. + + Returns: + None + """ + try: + conn, cursor, table_name = db_act.initialize_db_connection(db) + api_config = get_api_config(bearer_token, graphql_url) + env_table_name = f"{table_name}_{env}" + + rows = fetch_all_records(cursor, env_table_name) + if not rows: + callback("No records to process.") + return + + for row in rows: + try: + process_record(cursor, env_table_name, api_config, row, callback) + except Exception as record_error: # pylint: disable=broad-except + callback(f"Error processing record: {record_error}") + + conn.commit() + callback("All records processed successfully.") + except Exception as general_error: # pylint: disable=broad-except + callback(f"Error processing records: {general_error}") + finally: + cursor.close() + conn.close()