diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml index 1eefb13..7f0b27a 100644 --- a/.github/workflows/codacy.yml +++ b/.github/workflows/codacy.yml @@ -1,61 +1,61 @@ -# This workflow uses actions that are not certified by GitHub. -# They are provided by a third-party and are governed by -# separate terms of service, privacy policy, and support -# documentation. - -# This workflow checks out code, performs a Codacy security scan -# and integrates the results with the -# GitHub Advanced Security code scanning feature. For more information on -# the Codacy security scan action usage and parameters, see -# https://github.com/codacy/codacy-analysis-cli-action. -# For more information on Codacy Analysis CLI in general, see -# https://github.com/codacy/codacy-analysis-cli. - -name: Codacy Security Scan - -on: - push: - branches: [ "main" ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ "main" ] - schedule: - - cron: '43 3 * * 0' - -permissions: - contents: read - -jobs: - codacy-security-scan: - permissions: - contents: read # for actions/checkout to fetch code - security-events: write # for github/codeql-action/upload-sarif to upload SARIF results - actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status - name: Codacy Security Scan - runs-on: ubuntu-latest - steps: - # Checkout the repository to the GitHub Actions runner - - name: Checkout code - uses: actions/checkout@v3 - - # Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis - - name: Run Codacy Analysis CLI - uses: codacy/codacy-analysis-cli-action@d840f886c4bd4edc059706d09c6a1586111c540b - with: - # Check https://github.com/codacy/codacy-analysis-cli#project-token to get your project token from your Codacy repository - # You can also omit the token and run the tools that support default configurations - project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} - verbose: true - output: results.sarif - format: sarif - # Adjust severity of non-security issues - gh-code-scanning-compat: true - # Force 0 exit code to allow SARIF file generation - # This will handover control about PR rejection to the GitHub side - max-allowed-issues: 2147483647 - - # Upload the SARIF file generated in the previous step - - name: Upload SARIF results file - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: results.sarif +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# This workflow checks out code, performs a Codacy security scan +# and integrates the results with the +# GitHub Advanced Security code scanning feature. For more information on +# the Codacy security scan action usage and parameters, see +# https://github.com/codacy/codacy-analysis-cli-action. +# For more information on Codacy Analysis CLI in general, see +# https://github.com/codacy/codacy-analysis-cli. + +name: Codacy Security Scan + +on: + push: + branches: [ "main" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "main" ] + schedule: + - cron: '43 3 * * 0' + +permissions: + contents: read + +jobs: + codacy-security-scan: + permissions: + contents: read # for actions/checkout to fetch code + security-events: write # for github/codeql-action/upload-sarif to upload SARIF results + actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status + name: Codacy Security Scan + runs-on: ubuntu-latest + steps: + # Checkout the repository to the GitHub Actions runner + - name: Checkout code + uses: actions/checkout@v3 + + # Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis + - name: Run Codacy Analysis CLI + uses: codacy/codacy-analysis-cli-action@d840f886c4bd4edc059706d09c6a1586111c540b + with: + # Check https://github.com/codacy/codacy-analysis-cli#project-token to get your project token from your Codacy repository + # You can also omit the token and run the tools that support default configurations + project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} + verbose: true + output: results.sarif + format: sarif + # Adjust severity of non-security issues + gh-code-scanning-compat: true + # Force 0 exit code to allow SARIF file generation + # This will handover control about PR rejection to the GitHub side + max-allowed-issues: 2147483647 + + # Upload the SARIF file generated in the previous step + - name: Upload SARIF results file + uses: github/codeql-action/upload-sarif@v2 + with: + sarif_file: results.sarif diff --git a/.gitignore b/.gitignore index 78d8347..e832dd3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,174 +1,174 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml -.pdm-python -.pdm-build/ - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -# User-specific stuff -src/chocolate/static/img/mediaImages/* -src/chocolate/static/img/avatars/* -!src/chocolate/static/img/avatars/defaultUserProfilePic.png - -src/chocolate/*.db -src/chocolate/*.ini -!src/chocolate/empty_config.ini -src/chocolate/send_languages_to_weblate.py -src/chocolate/intro.py +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# User-specific stuff +src/chocolate/static/img/mediaImages/* +src/chocolate/static/img/avatars/* +!src/chocolate/static/img/avatars/defaultUserProfilePic.png + +src/chocolate/*.db +src/chocolate/*.ini +!src/chocolate/empty_config.ini +src/chocolate/send_languages_to_weblate.py +src/chocolate/intro.py src/chocolate/convert.py diff --git a/.vscode/settings.json b/.vscode/settings.json index e62482d..316c549 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,9 +1,10 @@ -{ - "[python]": { - "editor.formatOnSave": true, - "editor.defaultFormatter": "ms-python.black-formatter" - }, - "ruff.args": [ - "--ignore=E501" - ] +{ + "[python]": { + "editor.formatOnSave": true, + "editor.defaultFormatter": "ms-python.black-formatter" + }, + "ruff.args": [ + "--ignore=E501" + ], + "vscord.enabled": true } \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 88d096f..11214a9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,10 @@ -FROM python:3.10-slim-buster - -RUN apt-get update && apt-get install -y ffmpeg && apt-get install -y unrar && apt-get install -y git - -COPY requirements.txt . -RUN pip install -r requirements.txt - -COPY . /chocolate - +FROM python:3.10-slim-buster + +RUN apt-get update && apt-get install -y ffmpeg && apt-get install -y unrar && apt-get install -y git + +COPY requirements.txt . +RUN pip install -r requirements.txt + +COPY . /chocolate + CMD ["python", "/chocolate/app.py"] \ No newline at end of file diff --git a/README.md b/README.md index 7d3fa28..a52f040 100644 --- a/README.md +++ b/README.md @@ -1,212 +1,212 @@ -

- -

- -
- - ![wakatime](https://wakatime.com/badge/user/4cf4132a-4ced-411d-b714-67bdbdc84527/project/ecce3f45-dba9-4e4b-8f78-693c6d237d1c.svg) - [![GitHub release](https://img.shields.io/github/release/ChocolateApp/Chocolate?include_prereleases=&sort=semver&color=blue)](https://github.com/ChocolateApp/Chocolate/releases/) - [![GitHub stars](https://img.shields.io/github/stars/ChocolateApp/Chocolate?style=social&label=Stars&color=blue)](https://github.com/ChocolateApp/Chocolate) - [![GitHub watchers](https://img.shields.io/github/watchers/ChocolateApp/Chocolate?style=social&label=Watchers&color=blue)](https://github.com/ChocolateApp/Chocolate) - [![License](https://img.shields.io/badge/License-MIT-blue)](#license) - [![issues - Chocolate](https://img.shields.io/github/issues/ChocolateApp/Chocolate)](https://github.com/ChocolateApp/Chocolate/issues) - -
- -## About The Project -Chocolate is a free and Open Source media manager.
-It allows you to manage your media collection and organize it in a way that is easy to use and easy to search.
-Pair your popcorn with Chocolate and enjoy your favorite movie!
-It's a free software.
-

-This product uses the TMDB API but is not endorsed or certified by TMDB |


- - - - - -### Built With - -Chocolate is actually made with this technologies: - -* HTML5 -* CSS3 -* Javascript -* Python -* Flask - - - -## Getting Started - -This is what you have to do to get started with Chocolate : - -## Prerequisites - -### Installation - -#### For Linux -* Go to the [latest release](https://github.com/ChocolateApp/Chocolate/releases/latest) -* Download the latest installer named `install.sh` -* Place it where you want -* Run it -* Enjoy ! - -#### For Windows -No installer available, either: -* [Use Docker]( https://github.com/ChocolateApp/Chocolate#for-docker) -* Install manually - * So download the source code and install the dependencies (requirements.txt, ffmpeg and winrar (only for cbr files so books)) - * For ffmpeg and winrar, you have to add them to your PATH - -#### For QNAP -* Go here: [https://www.myqnap.org/product/chocolate81/](https://www.myqnap.org/product/chocolate81/) -* Enjoy ! - -#### For Docker -* Execute `docker pull imprevisible/chocolate` -* Enjoy ! - -### Files organizations - -#### For Movies : -* Create a directory -* Put all your movies in (directly the files or in a subfolder) -* Create a new library and select the directory you created with the specific type -* It's done - -#### For Shows : -* Create a directory where you will put all your shows -* Choose between two ways to organize your shows : - * One directory per show, with directories for each season, and files for each episode - * All files in one directory, for all shows, with a good name that can be analyzed -* Create a new library and select the directory you created with the specific type -* It's done - -#### For Games : -* Create a directory -* Create a directory for each consoles -* For each directory put games for this console -* Some consoles need a bios, go to /static/bios/ - * Create a directory named by the console - * Put in the bios file -* It's done - -#### For Books : -* Create a directory -* Put all your books in with the name that you want -* It's done - -### List of supported console : - * Gameboy - * Gameboy Color - * Gameboy Advance - * Nintendo DS - * Nintendo 64 - * Nintendo Entertainment System - * Super Nintendo Entertainment System - * Sega Master System - * Sega Mega Drive - * Sega Saturn - * Sony Playstation 1 (for .cue and .bin you have to .zip all files) (need a bios) - -### Start Chocolate - -#### For Linux -* execute 'chocolate' in your terminal - -#### For Windows -* Execute app.py - -#### For Docker -/!\ The docker image has some problems, it's not working for now /!\ -* Execute : - * CMD : `docker run -d -v %cd%:/chocolate imprevisible/chocolate` - * Powershell : `docker run -d -v ${PWD}:/chocolate imprevisible/chocolate` - * Linux : `docker run -d -v $(pwd):/chocolate imprevisible/chocolate` - -### Important Informations -* The port of Chocolate is 8888. - - -## Usage -![screencapture-localhost-8500-2022-08-18-18_03_30](https://user-images.githubusercontent.com/69050895/185441919-61db8093-8aa7-49d1-aa58-d04520b9a250.png) -![screencapture-localhost-8500-films-2022-08-18-18_04_53](https://user-images.githubusercontent.com/69050895/185442124-ecf72fe9-344f-4836-b21b-597c4c36c1d0.png) - - - - -## Contributing - -Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**. - -If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag "enhancement". -Don't forget to give the project a star! Thanks again! - -1. Fork the Project -2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`) -3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`) -4. Push to the Branch (`git push origin feature/AmazingFeature`) -5. Open a Pull Request - - -## TO-DO for Chocolate Server -- [ ] A docker image **URGENT** -- [ ] Allow library fusion (for example, if you have two libraries for movies, you can merge them, so only one library will be displayed) -- [ ] Create a plugin system -- [ ] Add the mobile ui of the video player -- [ ] Add a watchtogether system -- [ ] Multiple interface -- [ ] Allow custom css -- [ ] Statistics -- [ ] Custom intro -- [ ] Add a system to search for subtitles (By using OpenSubtitles API [here](https://opensubtitles.stoplight.io/docs/opensubtitles-api/b1eb44d4c8502-open-subtitles-api) ) -- [ ] Send issues directly from the website -- [ ] Add watched movies, and series to TRAKT -- [ ] Add support to trakt -- [ ] Use the GPU to encode videos if possible -- [ ] Change season with the buttons -- [ ] Add logs -- [ ] Design a UI for the path selection instead of a string -- [ ] Use two pages for books on horizontal screen -- [ ] NFO support -- [ ] Allow support of PosgreSQL/MySQL - -### Work in progress -- [ ] Detect series intro and skip them -- [ ] Dev a mobile/TV app with chromecasting, and download -- [ ] Add all audio tracks - - -## Contact - -Official Discord Server - [https://discord.gg/qbWdzuPhZ4](https://discord.gg/qbWdzuPhZ4)
-Project Link: [https://github.com/ChocolateApp/Chocolate](https://github.com/ChocolateApp/Chocolate)
-Impre'visible#2576 - [@romeo_chevrier](https://twitter.com/romeo_chevrier) - impr.visible@gmail.com
- - - -## Acknowledgments - -If you like this project, please consider giving me a star ⭐ to support my work and the futures update of this project. -[![stars - Chocolate](https://img.shields.io/github/stars/ChocolateApp/Chocolate?style=social)](https://github.com/ChocolateApp/Chocolate) - -This tool was made by Impre-visible, some features needed the help of several volunteers, that I thank, you can contact them on this server : [Dev'Area](https://discord.gg/hTmbFePH) - -Original website design from [Mart](https://www.figma.com/@Martbrady) on [figma](https://www.figma.com/community/file/970595453636409922)
-A special thanks to Mathias08 who made it possible to release v1 of Chocolate and MONSTA CARDO !! who made the animated logo ! - -The consoles images are in part from [Jude Coram](https://www.judecoram.com/pixel-art-game-consoles/) the rest are made by me. - -This tool was made with ❤ and ☕ by Impre-visible. - - -## License - -
- This work is licensed under a - - GNU GENERAL PUBLIC LICENSE - -
+

+ +

+ +
+ + ![wakatime](https://wakatime.com/badge/user/4cf4132a-4ced-411d-b714-67bdbdc84527/project/ecce3f45-dba9-4e4b-8f78-693c6d237d1c.svg) + [![GitHub release](https://img.shields.io/github/release/ChocolateApp/Chocolate?include_prereleases=&sort=semver&color=blue)](https://github.com/ChocolateApp/Chocolate/releases/) + [![GitHub stars](https://img.shields.io/github/stars/ChocolateApp/Chocolate?style=social&label=Stars&color=blue)](https://github.com/ChocolateApp/Chocolate) + [![GitHub watchers](https://img.shields.io/github/watchers/ChocolateApp/Chocolate?style=social&label=Watchers&color=blue)](https://github.com/ChocolateApp/Chocolate) + [![License](https://img.shields.io/badge/License-MIT-blue)](#license) + [![issues - Chocolate](https://img.shields.io/github/issues/ChocolateApp/Chocolate)](https://github.com/ChocolateApp/Chocolate/issues) + +
+ +## About The Project +Chocolate is a free and Open Source media manager.
+It allows you to manage your media collection and organize it in a way that is easy to use and easy to search.
+Pair your popcorn with Chocolate and enjoy your favorite movie!
+It's a free software.
+

+This product uses the TMDB API but is not endorsed or certified by TMDB |


+ + + + + +### Built With + +Chocolate is actually made with this technologies: + +* HTML5 +* CSS3 +* Javascript +* Python +* Flask + + + +## Getting Started + +This is what you have to do to get started with Chocolate : + +## Prerequisites + +### Installation + +#### For Linux +* Go to the [latest release](https://github.com/ChocolateApp/Chocolate/releases/latest) +* Download the latest installer named `install.sh` +* Place it where you want +* Run it +* Enjoy ! + +#### For Windows +No installer available, either: +* [Use Docker]( https://github.com/ChocolateApp/Chocolate#for-docker) +* Install manually + * So download the source code and install the dependencies (requirements.txt, ffmpeg and winrar (only for cbr files so books)) + * For ffmpeg and winrar, you have to add them to your PATH + +#### For QNAP +* Go here: [https://www.myqnap.org/product/chocolate81/](https://www.myqnap.org/product/chocolate81/) +* Enjoy ! + +#### For Docker +* Execute `docker pull imprevisible/chocolate` +* Enjoy ! + +### Files organizations + +#### For Movies : +* Create a directory +* Put all your movies in (directly the files or in a subfolder) +* Create a new library and select the directory you created with the specific type +* It's done + +#### For Shows : +* Create a directory where you will put all your shows +* Choose between two ways to organize your shows : + * One directory per show, with directories for each season, and files for each episode + * All files in one directory, for all shows, with a good name that can be analyzed +* Create a new library and select the directory you created with the specific type +* It's done + +#### For Games : +* Create a directory +* Create a directory for each consoles +* For each directory put games for this console +* Some consoles need a bios, go to /static/bios/ + * Create a directory named by the console + * Put in the bios file +* It's done + +#### For Books : +* Create a directory +* Put all your books in with the name that you want +* It's done + +### List of supported console : + * Gameboy + * Gameboy Color + * Gameboy Advance + * Nintendo DS + * Nintendo 64 + * Nintendo Entertainment System + * Super Nintendo Entertainment System + * Sega Master System + * Sega Mega Drive + * Sega Saturn + * Sony Playstation 1 (for .cue and .bin you have to .zip all files) (need a bios) + +### Start Chocolate + +#### For Linux +* execute 'chocolate' in your terminal + +#### For Windows +* Execute app.py + +#### For Docker +/!\ The docker image has some problems, it's not working for now /!\ +* Execute : + * CMD : `docker run -d -v %cd%:/chocolate imprevisible/chocolate` + * Powershell : `docker run -d -v ${PWD}:/chocolate imprevisible/chocolate` + * Linux : `docker run -d -v $(pwd):/chocolate imprevisible/chocolate` + +### Important Informations +* The port of Chocolate is 8888. + + +## Usage +![screencapture-localhost-8500-2022-08-18-18_03_30](https://user-images.githubusercontent.com/69050895/185441919-61db8093-8aa7-49d1-aa58-d04520b9a250.png) +![screencapture-localhost-8500-films-2022-08-18-18_04_53](https://user-images.githubusercontent.com/69050895/185442124-ecf72fe9-344f-4836-b21b-597c4c36c1d0.png) + + + + +## Contributing + +Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**. + +If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag "enhancement". +Don't forget to give the project a star! Thanks again! + +1. Fork the Project +2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`) +3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`) +4. Push to the Branch (`git push origin feature/AmazingFeature`) +5. Open a Pull Request + + +## TO-DO for Chocolate Server +- [ ] A docker image **URGENT** +- [ ] Allow library fusion (for example, if you have two libraries for movies, you can merge them, so only one library will be displayed) +- [ ] Create a plugin system +- [ ] Add the mobile ui of the video player +- [ ] Add a watchtogether system +- [ ] Multiple interface +- [ ] Allow custom css +- [ ] Statistics +- [ ] Custom intro +- [ ] Add a system to search for subtitles (By using OpenSubtitles API [here](https://opensubtitles.stoplight.io/docs/opensubtitles-api/b1eb44d4c8502-open-subtitles-api) ) +- [ ] Send issues directly from the website +- [ ] Add watched movies, and series to TRAKT +- [ ] Add support to trakt +- [ ] Use the GPU to encode videos if possible +- [ ] Change season with the buttons +- [ ] Add logs +- [ ] Design a UI for the path selection instead of a string +- [ ] Use two pages for books on horizontal screen +- [ ] NFO support +- [ ] Allow support of PosgreSQL/MySQL + +### Work in progress +- [ ] Detect series intro and skip them +- [ ] Dev a mobile/TV app with chromecasting, and download +- [ ] Add all audio tracks + + +## Contact + +Official Discord Server - [https://discord.gg/qbWdzuPhZ4](https://discord.gg/qbWdzuPhZ4)
+Project Link: [https://github.com/ChocolateApp/Chocolate](https://github.com/ChocolateApp/Chocolate)
+Impre'visible#2576 - [@romeo_chevrier](https://twitter.com/romeo_chevrier) - impr.visible@gmail.com
+ + + +## Acknowledgments + +If you like this project, please consider giving me a star ⭐ to support my work and the futures update of this project. +[![stars - Chocolate](https://img.shields.io/github/stars/ChocolateApp/Chocolate?style=social)](https://github.com/ChocolateApp/Chocolate) + +This tool was made by Impre-visible, some features needed the help of several volunteers, that I thank, you can contact them on this server : [Dev'Area](https://discord.gg/hTmbFePH) + +Original website design from [Mart](https://www.figma.com/@Martbrady) on [figma](https://www.figma.com/community/file/970595453636409922)
+A special thanks to Mathias08 who made it possible to release v1 of Chocolate and MONSTA CARDO !! who made the animated logo ! + +The consoles images are in part from [Jude Coram](https://www.judecoram.com/pixel-art-game-consoles/) the rest are made by me. + +This tool was made with ❤ and ☕ by Impre-visible. + + +## License + +
+ This work is licensed under a + + GNU GENERAL PUBLIC LICENSE + +
diff --git a/docker-compose.yml b/docker-compose.yml index 5870426..bacd3e8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,10 +1,10 @@ -version: '3.9' - -services: - chocolate: - container_name: chocolate - image: imprevisible/chocolate:latest - ports: - - 8500:8500 - volumes: +version: '3.9' + +services: + chocolate: + container_name: chocolate + image: imprevisible/chocolate:latest + ports: + - 8500:8500 + volumes: - ./:/chocolate \ No newline at end of file diff --git a/licence.md b/licence.md index e72bfdd..871ce8e 100644 --- a/licence.md +++ b/licence.md @@ -1,674 +1,674 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read . \ No newline at end of file diff --git a/pdm.lock b/pdm.lock index 7c50588..da5f2a6 100644 --- a/pdm.lock +++ b/pdm.lock @@ -1,1074 +1,1085 @@ -# This file is @generated by PDM. -# It is not intended for manual editing. - -[metadata] -groups = ["default", "lint"] -cross_platform = true -static_urls = false -lock_version = "4.3" -content_hash = "sha256:8f61f6604324ac82a52a46619a51f848e997fad28c3c944fa807d323da67d598" - -[[package]] -name = "alembic" -version = "1.11.2" -requires_python = ">=3.7" -summary = "A database migration tool for SQLAlchemy." -dependencies = [ - "Mako", - "SQLAlchemy>=1.3.0", - "typing-extensions>=4", -] -files = [ - {file = "alembic-1.11.2-py3-none-any.whl", hash = "sha256:7981ab0c4fad4fe1be0cf183aae17689fe394ff874fd2464adb774396faf0796"}, - {file = "alembic-1.11.2.tar.gz", hash = "sha256:678f662130dc540dac12de0ea73de9f89caea9dbea138f60ef6263149bf84657"}, -] - -[[package]] -name = "argparse" -version = "1.4.0" -summary = "Python command-line parsing library" -files = [ - {file = "argparse-1.4.0-py2.py3-none-any.whl", hash = "sha256:c31647edb69fd3d465a847ea3157d37bed1f95f19760b11a47aa91c04b666314"}, - {file = "argparse-1.4.0.tar.gz", hash = "sha256:62b089a55be1d8949cd2bc7e0df0bddb9e028faefc8c32038cc84862aefdd6e4"}, -] - -[[package]] -name = "babelfish" -version = "0.6.0" -requires_python = ">=3.6,<4.0" -summary = "A module to work with countries and languages" -files = [ - {file = "babelfish-0.6.0-py3-none-any.whl", hash = "sha256:268f1c6279f2a04a66837972e8a9f3dcc68e16f1201eec57d2a4b828a8b41b11"}, - {file = "babelfish-0.6.0.tar.gz", hash = "sha256:2dadfadd1b205ca5fa5dc9fa637f5b7933160a0418684c7c46a7a664033208a2"}, -] - -[[package]] -name = "beautifulsoup4" -version = "4.12.2" -requires_python = ">=3.6.0" -summary = "Screen-scraping library" -dependencies = [ - "soupsieve>1.2", -] -files = [ - {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, - {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, -] - -[[package]] -name = "black" -version = "23.7.0" -requires_python = ">=3.8" -summary = "The uncompromising code formatter." -dependencies = [ - "click>=8.0.0", - "mypy-extensions>=0.4.3", - "packaging>=22.0", - "pathspec>=0.9.0", - "platformdirs>=2", -] -files = [ - {file = "black-23.7.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a"}, - {file = "black-23.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926"}, - {file = "black-23.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad"}, - {file = "black-23.7.0-py3-none-any.whl", hash = "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96"}, - {file = "black-23.7.0.tar.gz", hash = "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb"}, -] - -[[package]] -name = "certifi" -version = "2023.7.22" -requires_python = ">=3.6" -summary = "Python package for providing Mozilla's CA Bundle." -files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.2.0" -requires_python = ">=3.7.0" -summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -files = [ - {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, - {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, -] - -[[package]] -name = "click" -version = "8.1.6" -requires_python = ">=3.7" -summary = "Composable command line interface toolkit" -dependencies = [ - "colorama; platform_system == \"Windows\"", -] -files = [ - {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"}, - {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"}, -] - -[[package]] -name = "colorama" -version = "0.4.5" -requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -summary = "Cross-platform colored terminal text." -files = [ - {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, - {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, -] - -[[package]] -name = "deep-translator" -version = "1.10.1" -requires_python = ">=3.7,<4.0" -summary = "A flexible free and unlimited python tool to translate between different languages in a simple way using multiple translators" -dependencies = [ - "beautifulsoup4<5.0.0,>=4.9.1", - "requests<3.0.0,>=2.23.0", -] -files = [ - {file = "deep_translator-1.10.1-py3-none-any.whl", hash = "sha256:8581e93c4d33746cd6e662607fefb26c457eae9b197de0648e3f25f324ef6534"}, - {file = "deep_translator-1.10.1.tar.gz", hash = "sha256:e99438dab70e3bebcda932e3f5e86fd3d32b43f87d66ed9f8b6816db146f1d9f"}, -] - -[[package]] -name = "deezer-python" -version = "6.1.0" -requires_python = ">=3.8,<4.0" -summary = "A friendly wrapper library for the Deezer API" -dependencies = [ - "requests>=2.18", -] -files = [ - {file = "deezer_python-6.1.0-py3-none-any.whl", hash = "sha256:ea15381cc05f978654c73097c9b1e40a5ae19ce6d5332303a2c4c5e047f73085"}, - {file = "deezer_python-6.1.0.tar.gz", hash = "sha256:84930fda42a4b91ffa287296fb92669b81c73bd5d69437801f2acc6575ca812e"}, -] - -[[package]] -name = "ebooklib" -version = "0.18" -summary = "Ebook library which can handle EPUB2/EPUB3 and Kindle format" -dependencies = [ - "lxml", - "six", -] -files = [ - {file = "EbookLib-0.18.tar.gz", hash = "sha256:38562643a7bc94d9bf56e9930b4927e4e93b5d1d0917f697a6454db5a1c1a533"}, -] - -[[package]] -name = "flask" -version = "2.2.2" -requires_python = ">=3.7" -summary = "A simple framework for building complex web applications." -dependencies = [ - "Jinja2>=3.0", - "Werkzeug>=2.2.2", - "click>=8.0", - "itsdangerous>=2.0", -] -files = [ - {file = "Flask-2.2.2-py3-none-any.whl", hash = "sha256:b9c46cc36662a7949f34b52d8ec7bb59c0d74ba08ba6cb9ce9adc1d8676d9526"}, - {file = "Flask-2.2.2.tar.gz", hash = "sha256:642c450d19c4ad482f96729bd2a8f6d32554aa1e231f4f6b4e7e5264b16cca2b"}, -] - -[[package]] -name = "flask-cors" -version = "3.0.10" -summary = "A Flask extension adding a decorator for CORS support" -dependencies = [ - "Flask>=0.9", - "Six", -] -files = [ - {file = "Flask-Cors-3.0.10.tar.gz", hash = "sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de"}, - {file = "Flask_Cors-3.0.10-py2.py3-none-any.whl", hash = "sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438"}, -] - -[[package]] -name = "flask-login" -version = "0.6.2" -requires_python = ">=3.7" -summary = "User authentication and session management for Flask." -dependencies = [ - "Flask>=1.0.4", - "Werkzeug>=1.0.1", -] -files = [ - {file = "Flask-Login-0.6.2.tar.gz", hash = "sha256:c0a7baa9fdc448cdd3dd6f0939df72eec5177b2f7abe6cb82fc934d29caac9c3"}, - {file = "Flask_Login-0.6.2-py3-none-any.whl", hash = "sha256:1ef79843f5eddd0f143c2cd994c1b05ac83c0401dc6234c143495af9a939613f"}, -] - -[[package]] -name = "flask-migrate" -version = "4.0.4" -requires_python = ">=3.6" -summary = "SQLAlchemy database migrations for Flask applications using Alembic." -dependencies = [ - "Flask-SQLAlchemy>=1.0", - "Flask>=0.9", - "alembic>=1.9.0", -] -files = [ - {file = "Flask-Migrate-4.0.4.tar.gz", hash = "sha256:73293d40b10ac17736e715b377e7b7bde474cb8105165d77474df4c3619b10b3"}, - {file = "Flask_Migrate-4.0.4-py3-none-any.whl", hash = "sha256:77580f27ab39bc68be4906a43c56d7674b45075bc4f883b1d0b985db5164d58f"}, -] - -[[package]] -name = "flask-sqlalchemy" -version = "3.0.3" -requires_python = ">=3.7" -summary = "Add SQLAlchemy support to your Flask application." -dependencies = [ - "Flask>=2.2", - "SQLAlchemy>=1.4.18", -] -files = [ - {file = "Flask-SQLAlchemy-3.0.3.tar.gz", hash = "sha256:2764335f3c9d7ebdc9ed6044afaf98aae9fa50d7a074cef55dde307ec95903ec"}, - {file = "Flask_SQLAlchemy-3.0.3-py3-none-any.whl", hash = "sha256:add5750b2f9cd10512995261ee2aa23fab85bd5626061aa3c564b33bb4aa780a"}, -] - -[[package]] -name = "get-video-properties" -version = "0.1.1" -summary = "Get video properties" -files = [ - {file = "get_video_properties-0.1.1-py3-none-any.whl", hash = "sha256:04d4f478a5211917e2a7e87ddfcb1c17734cddf8374494c3993bf825b7ad4192"}, -] - -[[package]] -name = "gitdb" -version = "4.0.10" -requires_python = ">=3.7" -summary = "Git Object Database" -dependencies = [ - "smmap<6,>=3.0.1", -] -files = [ - {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, - {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, -] - -[[package]] -name = "gitpython" -version = "3.1.31" -requires_python = ">=3.7" -summary = "GitPython is a Python library used to interact with Git repositories" -dependencies = [ - "gitdb<5,>=4.0.1", -] -files = [ - {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"}, - {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"}, -] - -[[package]] -name = "gputil" -version = "1.4.0" -summary = "GPUtil is a Python module for getting the GPU status from NVIDA GPUs using nvidia-smi." -files = [ - {file = "GPUtil-1.4.0.tar.gz", hash = "sha256:099e52c65e512cdfa8c8763fca67f5a5c2afb63469602d5dcb4d296b3661efb9"}, -] - -[[package]] -name = "greenlet" -version = "2.0.2" -requires_python = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" -summary = "Lightweight in-process concurrent programming" -files = [ - {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, - {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, - {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, - {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, - {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, -] - -[[package]] -name = "guessit" -version = "3.7.1" -summary = "GuessIt - a library for guessing information from video filenames." -dependencies = [ - "babelfish>=0.6.0", - "python-dateutil", - "rebulk>=3.2.0", -] -files = [ - {file = "guessit-3.7.1-py3-none-any.whl", hash = "sha256:c3be280ee8ec581a45ca6a654a92e317bf89567fdc55e7167452226f4f5b8b38"}, - {file = "guessit-3.7.1.tar.gz", hash = "sha256:2c18d982ee6db30db5d59557add0324a2b49bf3940a752947510632a2b58a3c1"}, -] - -[[package]] -name = "idna" -version = "3.4" -requires_python = ">=3.5" -summary = "Internationalized Domain Names in Applications (IDNA)" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "imagehash" -version = "4.3.1" -summary = "Image Hashing library" -dependencies = [ - "PyWavelets", - "numpy", - "pillow", - "scipy", -] -files = [ - {file = "ImageHash-4.3.1-py2.py3-none-any.whl", hash = "sha256:5ad9a5cde14fe255745a8245677293ac0d67f09c330986a351f34b614ba62fb5"}, - {file = "ImageHash-4.3.1.tar.gz", hash = "sha256:7038d1b7f9e0585beb3dd8c0a956f02b95a346c0b5f24a9e8cc03ebadaf0aa70"}, -] - -[[package]] -name = "itsdangerous" -version = "2.1.2" -requires_python = ">=3.7" -summary = "Safely pass data to untrusted environments and back." -files = [ - {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"}, - {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"}, -] - -[[package]] -name = "jinja2" -version = "3.1.2" -requires_python = ">=3.7" -summary = "A very fast and expressive template engine." -dependencies = [ - "MarkupSafe>=2.0", -] -files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] - -[[package]] -name = "levenshtein" -version = "0.20.9" -requires_python = ">=3.6" -summary = "Python extension for computing string edit distances and similarities." -dependencies = [ - "rapidfuzz<3.0.0,>=2.3.0", -] -files = [ - {file = "Levenshtein-0.20.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:105c239ec786750cd5136991c58196b440cc39b6acf3ec8227f6562c9a94e4b9"}, - {file = "Levenshtein-0.20.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f7728bea7fe6dc55ceecde0dcda4287e74fe3b6733ad42530f46aaa8d2f81d0"}, - {file = "Levenshtein-0.20.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc7eca755c13c92814c8cce8175524cf764ce38f39228b602f59eac58cfdc51a"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8a552e79d053dc1324fb90d342447fd4e15736f4cbc5363b6fbd5577f53dce9"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5474b2681ee0b7944fb1e7fe281cd44e2dfe75b03ba4558dca49c96fa0861b62"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:56e132c203b0dd8fc72a33e791c39ad0d5a25bcf24b130a1e202abbf489a3e75"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3badc94708ac05b405e795fde58a53272b90a9ee6099ecd54a345658b7b812e1"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48b9b3ae095b14dad7bc4bd219c7cd9113a7aa123a033337c85b00fe2ed565d3"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0d3a1f7328c91caeb1f857ddd2787e3f19d60cc2c688339d249ca8841da61454"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ef67c50428c99caf67d31bd209da21d9378da5f0cc3ad4f7bafb6caa78aee6f2"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:47f6d1592c0891f7355e38a302becd233336ca2f55f9a8be3a8635f946a6784f"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2891019740e874f05e0349e9f27b6af8ad837b1612f42e9c90c296d54d1404fd"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c554704eec4f4ba742febdcc79a85491f8f9a1d493cb103bb2af18536d6cf122"}, - {file = "Levenshtein-0.20.9-cp310-cp310-win32.whl", hash = "sha256:7628e356b3f9c78ad7272c3b9137f0641a1368849e749ff6f2c8fe372795806b"}, - {file = "Levenshtein-0.20.9-cp310-cp310-win_amd64.whl", hash = "sha256:ba2bafe3511194a37044cae4e7d328cca70657933052691c37eba2ca428a379d"}, - {file = "Levenshtein-0.20.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7605a94145198d19fdaaa7e29c0f8a56ad719b12386f3ae8cd8ed4cb9fa6c2e4"}, - {file = "Levenshtein-0.20.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:29db4dabfad2ddf33c7986eb6fd525c7587cca4c4d9e187365cff0a5281f5a35"}, - {file = "Levenshtein-0.20.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:965336c1772a4fc5fb2686a2a0bfaf3455dced96f19f50f278da8bc139076d31"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67235753035ac898d6475c0b29540521018db2e0027a3c1deb9aa0af0a84fd74"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:120dca58136aee3d8c7b190e30db7b6a6eb9579ea5712df84ad076a389801743"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6496ea66a6f755e48c0d82f1eee396d16edcd5592d4b3677d26fa789a636a728"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0af20327acc2c904d11611cb3a0d8d17f80c279a12e0b84189eafc35297186d"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d2f891ef53afbab6cf2eeb92ff13151884d17dc80a2d6d3c7ae74d7738b772"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2ab9c72380582bf4745d1c5b055b1df0c85f7a980a04bd7603a855dd91478c0f"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6de13be3eb5ac48053fb1635a7b4daa936b9114ad4b264942e9eb709fcaa41dd"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9fc296860588251d8d72b4f4637cca4eef7351e042a7a23d44e6385aef1e160"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:35777b20fe35858248c22da37984469e6dd1278f55d17c53378312853d5d683d"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b9e0642ddb4c431f77c38cec9edbd0317e26c3f37d072ccf281ab58926dce69"}, - {file = "Levenshtein-0.20.9-cp311-cp311-win32.whl", hash = "sha256:f88ec322d86d3cc9d3936dbf6b421ad813950c2658599d48ac4ede59f2a6047e"}, - {file = "Levenshtein-0.20.9-cp311-cp311-win_amd64.whl", hash = "sha256:2907a6888455f9915d5b656f5d058f63eaf6063b2c7f0f1ff6bc05706ae5bc39"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f674cc75f127692525563155e500a3fa16aaf24dafd33a9bcda46e2979f793a1"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a34e3fd21acb31fcd29a0c8353dca74dfbb59957210a6f142505907a9dff3d59"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0ddddf2beafd1a2e17a87f80be562a7f7478e6098ccfc15de4c879972dfa2f9"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9649af1a896a4a7fc7f6f1fd093e8a92f463297f56c7bd0f8d7d16dfabeb236d"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d7bd7f25336849027fbe5ed32b6ffd404436727d78a014e348dcd17347c73fd8"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0371d996ae81089296f42b6e886c7bf138d1cb0f002b0c724a9e5d689b29b5a0"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7e00e2fda9f225b5f4537647f6195cf220d468532739d3390eaf082b1d76c87"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1600f5ebe2f2aebf13e88cf488ec2e5ce25f7a42b5846335018693baf4ea63bd"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bcd59fcf06aaedda98da185ec289dc2c2c9922ce789f6a9c101709d4a22cac9"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:1549e307028fa5c3a8cf28ae8bcb1f6072df2abf7f36b9d7adf7fd60690fe372"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:795f2e95d09a33c66c73cd49be3ee632fb4b8c41be72c0cb8df29a329ce7d111"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:726bfb361d3b6786bea31392752f0ffcca568db7dc3f1e274f1b529489b8ad05"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e0fd315132786375de532355fa06b2f11c4b4af5784b7e064dc54b6ee0c3281"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0674bc0549d5ea9edb934b3b03a160a116cc410feb5739a51f9c4f618ee674e3"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1ef8f3ecdfca5d6f0538226338d58617270439a1cc9b6cacb30a388984bb1608"}, - {file = "Levenshtein-0.20.9.tar.gz", hash = "sha256:70a8ad5e28bb76d87da1eb3f31de940836596547d6d01317c2289f5b7cd0b0ea"}, -] - -[[package]] -name = "logging" -version = "0.4.9.6" -summary = "A logging module for Python" -files = [ - {file = "logging-0.4.9.6.tar.gz", hash = "sha256:26f6b50773f085042d301085bd1bf5d9f3735704db9f37c1ce6d8b85c38f2417"}, -] - -[[package]] -name = "lxml" -version = "4.9.3" -requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" -summary = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -files = [ - {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"}, - {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"}, - {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"}, - {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"}, - {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"}, - {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"}, - {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"}, - {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"}, - {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"}, - {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"}, - {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"}, - {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"}, - {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"}, - {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"}, - {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"}, - {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"}, - {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"}, - {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"}, -] - -[[package]] -name = "mako" -version = "1.2.4" -requires_python = ">=3.7" -summary = "A super-fast templating language that borrows the best ideas from the existing templating languages." -dependencies = [ - "MarkupSafe>=0.9.2", -] -files = [ - {file = "Mako-1.2.4-py3-none-any.whl", hash = "sha256:c97c79c018b9165ac9922ae4f32da095ffd3c4e6872b45eded42926deea46818"}, - {file = "Mako-1.2.4.tar.gz", hash = "sha256:d60a3903dc3bb01a18ad6a89cdbe2e4eadc69c0bc8ef1e3773ba53d44c3f7a34"}, -] - -[[package]] -name = "markupsafe" -version = "2.1.3" -requires_python = ">=3.7" -summary = "Safely add untrusted strings to HTML/XML markup." -files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, -] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -requires_python = ">=3.5" -summary = "Type system extensions for programs checked with the mypy type checker." -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "natsort" -version = "8.4.0" -requires_python = ">=3.7" -summary = "Simple yet flexible natural sorting in Python." -files = [ - {file = "natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c"}, - {file = "natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581"}, -] - -[[package]] -name = "numpy" -version = "1.25.2" -requires_python = ">=3.9" -summary = "Fundamental package for array computing in Python" -files = [ - {file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"}, - {file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"}, - {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"}, - {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"}, - {file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"}, - {file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"}, - {file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"}, - {file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"}, - {file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"}, - {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"}, - {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"}, - {file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"}, - {file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"}, - {file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"}, - {file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"}, - {file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"}, - {file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"}, - {file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"}, -] - -[[package]] -name = "opencv-python" -version = "4.5.5.64" -requires_python = ">=3.6" -summary = "Wrapper package for OpenCV python bindings." -dependencies = [ - "numpy>=1.14.5; python_version >= \"3.7\"", - "numpy>=1.17.3; python_version >= \"3.8\"", - "numpy>=1.19.3; python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\"", - "numpy>=1.19.3; python_version >= \"3.9\"", - "numpy>=1.21.2; python_version >= \"3.10\"", - "numpy>=1.21.2; python_version >= \"3.6\" and platform_system == \"Darwin\" and platform_machine == \"arm64\"", -] -files = [ - {file = "opencv-python-4.5.5.64.tar.gz", hash = "sha256:f65de0446a330c3b773cd04ba10345d8ce1b15dcac3f49770204e37602d0b3f7"}, - {file = "opencv_python-4.5.5.64-cp36-abi3-macosx_10_15_x86_64.whl", hash = "sha256:a512a0c59b6fec0fac3844b2f47d6ecb1a9d18d235e6c5491ce8dbbe0663eae8"}, - {file = "opencv_python-4.5.5.64-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6138b6903910e384067d001763d40f97656875487381aed32993b076f44375"}, - {file = "opencv_python-4.5.5.64-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b293ced62f4360d9f11cf72ae7e9df95320ff7bf5b834d87546f844e838c0c35"}, - {file = "opencv_python-4.5.5.64-cp36-abi3-win32.whl", hash = "sha256:6247e584813c00c3b9ed69a795da40d2c153dc923d0182e957e1c2f00a554ac2"}, - {file = "opencv_python-4.5.5.64-cp36-abi3-win_amd64.whl", hash = "sha256:408d5332550287aa797fd06bef47b2dfed163c6787668cc82ef9123a9484b56a"}, - {file = "opencv_python-4.5.5.64-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:7787bb017ae93d5f9bb1b817ac8e13e45dd193743cb648498fcab21d00cf20a3"}, -] - -[[package]] -name = "overrides" -version = "7.4.0" -requires_python = ">=3.6" -summary = "A decorator to automatically detect mismatch when overriding a method." -files = [ - {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, - {file = "overrides-7.4.0.tar.gz", hash = "sha256:9502a3cca51f4fac40b5feca985b6703a5c1f6ad815588a7ca9e285b9dca6757"}, -] - -[[package]] -name = "packaging" -version = "23.1" -requires_python = ">=3.7" -summary = "Core utilities for Python packages" -files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, -] - -[[package]] -name = "pathspec" -version = "0.11.2" -requires_python = ">=3.7" -summary = "Utility library for gitignore style pattern matching of file paths." -files = [ - {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"}, - {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"}, -] - -[[package]] -name = "pillow" -version = "9.5.0" -requires_python = ">=3.7" -summary = "Python Imaging Library (Fork)" -files = [ - {file = "Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16"}, - {file = "Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d"}, - {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903"}, - {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a"}, - {file = "Pillow-9.5.0-cp310-cp310-win32.whl", hash = "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44"}, - {file = "Pillow-9.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb"}, - {file = "Pillow-9.5.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32"}, - {file = "Pillow-9.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625"}, - {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579"}, - {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296"}, - {file = "Pillow-9.5.0-cp311-cp311-win32.whl", hash = "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec"}, - {file = "Pillow-9.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4"}, - {file = "Pillow-9.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089"}, - {file = "Pillow-9.5.0-cp312-cp312-win32.whl", hash = "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb"}, - {file = "Pillow-9.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799"}, - {file = "Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1"}, -] - -[[package]] -name = "platformdirs" -version = "3.10.0" -requires_python = ">=3.7" -summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -files = [ - {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"}, - {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"}, -] - -[[package]] -name = "pyarr" -version = "5.0.0" -requires_python = ">=3.9,<4.0" -summary = "Synchronous Sonarr, Radarr, Lidarr and Readarr API's for Python" -dependencies = [ - "overrides<8.0.0,>=7.3.1", - "requests<3.0.0,>=2.28.2", - "types-requests<3.0.0.0,>=2.28.11.17", -] -files = [ - {file = "pyarr-5.0.0-py3-none-any.whl", hash = "sha256:6a961ee6789afa8962c0edd99a87093c7a7421d7419e1c752fa03a61e6233a51"}, - {file = "pyarr-5.0.0.tar.gz", hash = "sha256:7b115b5fd81a3715f75b5ecdf42943d21eedeaea6f02514fa22ab5445f4650d2"}, -] - -[[package]] -name = "pycountry" -version = "22.3.5" -requires_python = ">=3.6, <4" -summary = "ISO country, subdivision, language, currency and script definitions and their translations" -dependencies = [ - "setuptools", -] -files = [ - {file = "pycountry-22.3.5.tar.gz", hash = "sha256:b2163a246c585894d808f18783e19137cb70a0c18fb36748dc01fc6f109c1646"}, -] - -[[package]] -name = "pymupdf" -version = "1.22.5" -requires_python = ">=3.7" -summary = "Python bindings for the PDF toolkit and renderer MuPDF" -files = [ - {file = "PyMuPDF-1.22.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:640b8e4cb116dd87a3c854e49808a4f63625e663a7bc5b1efc971db5b4775367"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:17efbbf0e2d99d24cfc302fac512928eb294f10b7b67d597d04dafd012812e4e"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc9b9bf0f2beea3911750d2d66247608be8cbad33b7a050cacec9e4c105a1ca"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7734a32a91eea4b502b8f9d2915cdba0a372226e14fb983876d763110dcefef"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-win32.whl", hash = "sha256:c2fd70ca9961f7871810dce1b7d0a42a69eb8ff2d786621123952bd505a6867e"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-win_amd64.whl", hash = "sha256:add310c96df6933cfb4ce3821c9c7b5c133e8aa609a4c9416e1c7af546163488"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:017aaba511526facfc928e9d95d2c10d28a2821b05b9039bf422031a7da8584e"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6fe5e44a14864d921fb96669a82f9635846806176f77f1d73c61feb84ebf4d84"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e74d766f79e41e10c51865233042ab2cc4612ca7942812dca0603f4d0f8f73d"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8175452fcc99a0af6429d8acd87682a3a70c5879d73532c7327f71ce508a35"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-win32.whl", hash = "sha256:42f59f4999d7f8b35c850050bd965e98c081a7d9b92d5f9dcf30203b30d06876"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-win_amd64.whl", hash = "sha256:3d71c47aa14b73f2df7d03be8c547a05df6c6898d8c63a0f752b26f206eefd3c"}, - {file = "PyMuPDF-1.22.5.tar.gz", hash = "sha256:5ec8d5106752297529d0d68d46cfc4ce99914aabd99be843f1599a1842d63fe9"}, -] - -[[package]] -name = "pypdf2" -version = "3.0.1" -requires_python = ">=3.6" -summary = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" -files = [ - {file = "PyPDF2-3.0.1.tar.gz", hash = "sha256:a74408f69ba6271f71b9352ef4ed03dc53a31aa404d29b5d31f53bfecfee1440"}, - {file = "pypdf2-3.0.1-py3-none-any.whl", hash = "sha256:d16e4205cfee272fbdc0568b68d82be796540b1537508cef59388f839c191928"}, -] - -[[package]] -name = "pypresence" -version = "4.2.1" -requires_python = ">=3.5" -summary = "Discord RPC client written in Python" -files = [ - {file = "pypresence-4.2.1-py2.py3-none-any.whl", hash = "sha256:12197b5f51c21e3e555b17f85d3e55023f4ad83b6fff72cd6387659ffd484a02"}, - {file = "pypresence-4.2.1.tar.gz", hash = "sha256:691daf98c8189fd216d988ebfc67779e0f664211512d9843f37ab0d51d4de066"}, -] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -summary = "Extensions to the standard Python datetime module" -dependencies = [ - "six>=1.5", -] -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[[package]] -name = "python-levenshtein" -version = "0.20.9" -requires_python = ">=3.6" -summary = "Python extension for computing string edit distances and similarities." -dependencies = [ - "Levenshtein==0.20.9", -] -files = [ - {file = "python-Levenshtein-0.20.9.tar.gz", hash = "sha256:4c507b1e26de29374153982fa477cea741edf095d892773343b4961beacac834"}, - {file = "python_Levenshtein-0.20.9-py3-none-any.whl", hash = "sha256:2a6f8c97ba554d7399e0b450e1fce5d90d6354b1c1762e419671de27f25736c5"}, -] - -[[package]] -name = "pywavelets" -version = "1.4.1" -requires_python = ">=3.8" -summary = "PyWavelets, wavelet transform module" -dependencies = [ - "numpy>=1.17.3", -] -files = [ - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:d854411eb5ee9cb4bc5d0e66e3634aeb8f594210f6a1bed96dbed57ec70f181c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:231b0e0b1cdc1112f4af3c24eea7bf181c418d37922a67670e9bf6cfa2d544d4"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:754fa5085768227c4f4a26c1e0c78bc509a266d9ebd0eb69a278be7e3ece943c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da7b9c006171be1f9ddb12cc6e0d3d703b95f7f43cb5e2c6f5f15d3233fcf202"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win32.whl", hash = "sha256:67a0d28a08909f21400cb09ff62ba94c064882ffd9e3a6b27880a111211d59bd"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91d3d393cffa634f0e550d88c0e3f217c96cfb9e32781f2960876f1808d9b45b"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:64c6bac6204327321db30b775060fbe8e8642316e6bff17f06b9f34936f88875"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f19327f2129fb7977bc59b966b4974dfd72879c093e44a7287500a7032695de"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad987748f60418d5f4138db89d82ba0cb49b086e0cbb8fd5c3ed4a814cfb705e"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875d4d620eee655346e3589a16a73790cf9f8917abba062234439b594e706784"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win32.whl", hash = "sha256:7231461d7a8eb3bdc7aa2d97d9f67ea5a9f8902522818e7e2ead9c2b3408eeb1"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:daf0aa79842b571308d7c31a9c43bc99a30b6328e6aea3f50388cd8f69ba7dbc"}, - {file = "PyWavelets-1.4.1.tar.gz", hash = "sha256:6437af3ddf083118c26d8f97ab43b0724b956c9f958e9ea788659f6a2834ba93"}, -] - -[[package]] -name = "rapidfuzz" -version = "2.15.1" -requires_python = ">=3.7" -summary = "rapid fuzzy string matching" -files = [ - {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fc0bc259ebe3b93e7ce9df50b3d00e7345335d35acbd735163b7c4b1957074d3"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d59fb3a410d253f50099d7063855c2b95df1ef20ad93ea3a6b84115590899f25"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c525a3da17b6d79d61613096c8683da86e3573e807dfaecf422eea09e82b5ba6"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4deae6a918ecc260d0c4612257be8ba321d8e913ccb43155403842758c46fbe"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2577463d10811386e704a3ab58b903eb4e2a31b24dfd9886d789b0084d614b01"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f67d5f56aa48c0da9de4ab81bffb310683cf7815f05ea38e5aa64f3ba4368339"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7927722ff43690e52b3145b5bd3089151d841d350c6f8378c3cfac91f67573a"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6534afc787e32c4104f65cdeb55f6abe4d803a2d0553221d00ef9ce12788dcde"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d0ae6ec79a1931929bb9dd57bc173eb5ba4c7197461bf69e3a34b6dd314feed2"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:be7ccc45c4d1a7dfb595f260e8022a90c6cb380c2a346ee5aae93f85c96d362b"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8ba013500a2b68c64b2aecc5fb56a2dad6c2872cf545a0308fd044827b6e5f6a"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4d9f7d10065f657f960b48699e7dddfce14ab91af4bab37a215f0722daf0d716"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7e24a1b802cea04160b3fccd75d2d0905065783ebc9de157d83c14fb9e1c6ce2"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-win32.whl", hash = "sha256:dffdf03499e0a5b3442951bb82b556333b069e0661e80568752786c79c5b32de"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d150d90a7c6caae7962f29f857a4e61d42038cfd82c9df38508daf30c648ae7"}, - {file = "rapidfuzz-2.15.1-cp310-cp310-win_arm64.whl", hash = "sha256:87c30e9184998ff6eb0fa9221f94282ce7c908fd0da96a1ef66ecadfaaa4cdb7"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6986413cb37035eb796e32f049cbc8c13d8630a4ac1e0484e3e268bb3662bd1b"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a72f26e010d4774b676f36e43c0fc8a2c26659efef4b3be3fd7714d3491e9957"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b5cd54c98a387cca111b3b784fc97a4f141244bbc28a92d4bde53f164464112e"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da7fac7c3da39f93e6b2ebe386ed0ffe1cefec91509b91857f6e1204509e931f"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f976e76ac72f650790b3a5402431612175b2ac0363179446285cb3c901136ca9"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:abde47e1595902a490ed14d4338d21c3509156abb2042a99e6da51f928e0c117"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca8f1747007a3ce919739a60fa95c5325f7667cccf6f1c1ef18ae799af119f5e"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c35da09ab9797b020d0d4f07a66871dfc70ea6566363811090353ea971748b5a"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a3a769ca7580686a66046b77df33851b3c2d796dc1eb60c269b68f690f3e1b65"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d50622efefdb03a640a51a6123748cd151d305c1f0431af762e833d6ffef71f0"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b7461b0a7651d68bc23f0896bffceea40f62887e5ab8397bf7caa883592ef5cb"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:074ee9e17912e025c72a5780ee4c7c413ea35cd26449719cc399b852d4e42533"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7025fb105a11f503943f17718cdb8241ea3bb4d812c710c609e69bead40e2ff0"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-win32.whl", hash = "sha256:2084d36b95139413cef25e9487257a1cc892b93bd1481acd2a9656f7a1d9930c"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:5a738fcd24e34bce4b19126b92fdae15482d6d3a90bd687fd3d24ce9d28ce82d"}, - {file = "rapidfuzz-2.15.1-cp311-cp311-win_arm64.whl", hash = "sha256:dc3cafa68cfa54638632bdcadf9aab89a3d182b4a3f04d2cad7585ed58ea8731"}, - {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b89d1126be65c85763d56e3b47d75f1a9b7c5529857b4d572079b9a636eaa8a7"}, - {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19b7460e91168229768be882ea365ba0ac7da43e57f9416e2cfadc396a7df3c2"}, - {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c33c03e7092642c38f8a15ca2d8fc38da366f2526ec3b46adf19d5c7aa48ba"}, - {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040faca2e26d9dab5541b45ce72b3f6c0e36786234703fc2ac8c6f53bb576743"}, - {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:6e2a3b23e1e9aa13474b3c710bba770d0dcc34d517d3dd6f97435a32873e3f28"}, - {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e597b9dfd6dd180982684840975c458c50d447e46928efe3e0120e4ec6f6686"}, - {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d14752c9dd2036c5f36ebe8db5f027275fa7d6b3ec6484158f83efb674bab84e"}, - {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558224b6fc6124d13fa32d57876f626a7d6188ba2a97cbaea33a6ee38a867e31"}, - {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c89cfa88dc16fd8c9bcc0c7f0b0073f7ef1e27cceb246c9f5a3f7004fa97c4d"}, - {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:509c5b631cd64df69f0f011893983eb15b8be087a55bad72f3d616b6ae6a0f96"}, - {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0f73a04135a03a6e40393ecd5d46a7a1049d353fc5c24b82849830d09817991f"}, - {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c99d53138a2dfe8ada67cb2855719f934af2733d726fbf73247844ce4dd6dd5"}, - {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f01fa757f0fb332a1f045168d29b0d005de6c39ee5ce5d6c51f2563bb53c601b"}, - {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60368e1add6e550faae65614844c43f8a96e37bf99404643b648bf2dba92c0fb"}, - {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785744f1270828cc632c5a3660409dee9bcaac6931a081bae57542c93e4d46c4"}, - {file = "rapidfuzz-2.15.1.tar.gz", hash = "sha256:d62137c2ca37aea90a11003ad7dc109c8f1739bfbe5a9a217f3cdb07d7ac00f6"}, -] - -[[package]] -name = "rarfile" -version = "4.0" -summary = "RAR archive reader for Python" -files = [ - {file = "rarfile-4.0-py3-none-any.whl", hash = "sha256:1094869119012f95c31a6f22cc3a9edbdca61861b805241116adbe2d737b68f8"}, - {file = "rarfile-4.0.tar.gz", hash = "sha256:67548769229c5bda0827c1663dce3f54644f9dbfba4ae86d4da2b2afd3e602a1"}, -] - -[[package]] -name = "rebulk" -version = "3.2.0" -summary = "Rebulk - Define simple search patterns in bulk to perform advanced matching on any string." -files = [ - {file = "rebulk-3.2.0-py3-none-any.whl", hash = "sha256:6bc31ae4b37200623c5827d2f539f9ec3e52b50431322dad8154642a39b0a53e"}, - {file = "rebulk-3.2.0.tar.gz", hash = "sha256:0d30bf80fca00fa9c697185ac475daac9bde5f646ce3338c9ff5d5dc1ebdfebc"}, -] - -[[package]] -name = "requests" -version = "2.31.0" -requires_python = ">=3.7" -summary = "Python HTTP for Humans." -dependencies = [ - "certifi>=2017.4.17", - "charset-normalizer<4,>=2", - "idna<4,>=2.5", - "urllib3<3,>=1.21.1", -] -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[[package]] -name = "scipy" -version = "1.9.3" -requires_python = ">=3.8" -summary = "Fundamental algorithms for scientific computing in Python" -dependencies = [ - "numpy<1.26.0,>=1.18.5", -] -files = [ - {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, - {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, - {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, - {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, - {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, - {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, - {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, - {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, - {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, - {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, - {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, -] - -[[package]] -name = "setuptools" -version = "68.0.0" -requires_python = ">=3.7" -summary = "Easily download, build, install, upgrade, and uninstall Python packages" -files = [ - {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, - {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, -] - -[[package]] -name = "six" -version = "1.16.0" -requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -summary = "Python 2 and 3 compatibility utilities" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "smmap" -version = "5.0.0" -requires_python = ">=3.6" -summary = "A pure Python implementation of a sliding window memory map manager" -files = [ - {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, - {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, -] - -[[package]] -name = "soupsieve" -version = "2.4.1" -requires_python = ">=3.7" -summary = "A modern CSS selector implementation for Beautiful Soup." -files = [ - {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, - {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, -] - -[[package]] -name = "sqlalchemy" -version = "1.4.45" -requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -summary = "Database Abstraction Library" -dependencies = [ - "greenlet!=0.4.17; python_version >= \"3\" and (platform_machine == \"aarch64\" or (platform_machine == \"ppc64le\" or (platform_machine == \"x86_64\" or (platform_machine == \"amd64\" or (platform_machine == \"AMD64\" or (platform_machine == \"win32\" or platform_machine == \"WIN32\"))))))", -] -files = [ - {file = "SQLAlchemy-1.4.45-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:ca152ffc7f0aa069c95fba46165030267ec5e4bb0107aba45e5e9e86fe4d9363"}, - {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06055476d38ed7915eeed22b78580556d446d175c3574a01b9eb04d91f3a8b2e"}, - {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:081e2a2d75466353c738ca2ee71c0cfb08229b4f9909b5fa085f75c48d021471"}, - {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96821d806c0c90c68ce3f2ce6dd529c10e5d7587961f31dd5c30e3bfddc4545d"}, - {file = "SQLAlchemy-1.4.45-cp310-cp310-win32.whl", hash = "sha256:c8051bff4ce48cbc98f11e95ac46bfd1e36272401070c010248a3230d099663f"}, - {file = "SQLAlchemy-1.4.45-cp310-cp310-win_amd64.whl", hash = "sha256:16ad798fc121cad5ea019eb2297127b08c54e1aa95fe17b3fea9fdbc5c34fe62"}, - {file = "SQLAlchemy-1.4.45-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:099efef0de9fbda4c2d7cb129e4e7f812007901942259d4e6c6e19bd69de1088"}, - {file = "SQLAlchemy-1.4.45-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a29d02c9e6f6b105580c5ed7afb722b97bc2e2fdb85e1d45d7ddd8440cfbca"}, - {file = "SQLAlchemy-1.4.45-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc10423b59d6d032d6dff0bb42aa06dc6a8824eb6029d70c7d1b6981a2e7f4d8"}, - {file = "SQLAlchemy-1.4.45-cp311-cp311-win32.whl", hash = "sha256:1a92685db3b0682776a5abcb5f9e9addb3d7d9a6d841a452a17ec2d8d457bea7"}, - {file = "SQLAlchemy-1.4.45-cp311-cp311-win_amd64.whl", hash = "sha256:db3ccbce4a861bf4338b254f95916fc68dd8b7aa50eea838ecdaf3a52810e9c0"}, - {file = "SQLAlchemy-1.4.45.tar.gz", hash = "sha256:fd69850860093a3f69fefe0ab56d041edfdfe18510b53d9a2eaecba2f15fa795"}, -] - -[[package]] -name = "tinytag" -version = "1.9.0" -requires_python = ">=2.7" -summary = "Read music meta data and length of MP3, OGG, OPUS, MP4, M4A, FLAC, WMA and Wave files" -files = [ - {file = "tinytag-1.9.0.tar.gz", hash = "sha256:f8d71110e1e680a33d99202e00a5a698481d25d20173b81ba3e863423979e014"}, -] - -[[package]] -name = "tmdbv3api" -version = "1.9.0" -summary = "A lightweight Python library for The Movie Database (TMDb) API." -dependencies = [ - "requests", -] -files = [ - {file = "tmdbv3api-1.9.0-py3-none-any.whl", hash = "sha256:2bcd8c6e8902397860715a71045f200ecc3ee06804ecf786cb4c1e09b2deeba8"}, - {file = "tmdbv3api-1.9.0.tar.gz", hash = "sha256:504c5da6b99c4516ff160a01576112d097f209c0534f943c15c4b56cbd92c33b"}, -] - -[[package]] -name = "types-requests" -version = "2.31.0.2" -summary = "Typing stubs for requests" -dependencies = [ - "types-urllib3", -] -files = [ - {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"}, - {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"}, -] - -[[package]] -name = "types-urllib3" -version = "1.26.25.14" -summary = "Typing stubs for urllib3" -files = [ - {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, - {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, -] - -[[package]] -name = "typing-extensions" -version = "4.7.1" -requires_python = ">=3.7" -summary = "Backported and Experimental Type Hints for Python 3.7+" -files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, -] - -[[package]] -name = "unidecode" -version = "1.3.6" -requires_python = ">=3.5" -summary = "ASCII transliterations of Unicode text" -files = [ - {file = "Unidecode-1.3.6-py3-none-any.whl", hash = "sha256:547d7c479e4f377b430dd91ac1275d593308dce0fc464fb2ab7d41f82ec653be"}, - {file = "Unidecode-1.3.6.tar.gz", hash = "sha256:fed09cf0be8cf415b391642c2a5addfc72194407caee4f98719e40ec2a72b830"}, -] - -[[package]] -name = "urllib3" -version = "2.0.4" -requires_python = ">=3.7" -summary = "HTTP library with thread-safe connection pooling, file post, and more." -files = [ - {file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"}, - {file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"}, -] - -[[package]] -name = "werkzeug" -version = "2.2.2" -requires_python = ">=3.7" -summary = "The comprehensive WSGI web application library." -dependencies = [ - "MarkupSafe>=2.1.1", -] -files = [ - {file = "Werkzeug-2.2.2-py3-none-any.whl", hash = "sha256:f979ab81f58d7318e064e99c4506445d60135ac5cd2e177a2de0089bfd4c9bd5"}, - {file = "Werkzeug-2.2.2.tar.gz", hash = "sha256:7ea2d48322cc7c0f8b3a215ed73eabd7b5d75d0b50e31ab006286ccff9e00b8f"}, -] +# This file is @generated by PDM. +# It is not intended for manual editing. + +[metadata] +groups = ["default", "lint"] +cross_platform = true +static_urls = false +lock_version = "4.3" +content_hash = "sha256:c095f0b622aa05b9bbfe0b9c3aa482ee940dfd28d074dbf719922d18b9637422" + +[[package]] +name = "alembic" +version = "1.11.2" +requires_python = ">=3.7" +summary = "A database migration tool for SQLAlchemy." +dependencies = [ + "Mako", + "SQLAlchemy>=1.3.0", + "typing-extensions>=4", +] +files = [ + {file = "alembic-1.11.2-py3-none-any.whl", hash = "sha256:7981ab0c4fad4fe1be0cf183aae17689fe394ff874fd2464adb774396faf0796"}, + {file = "alembic-1.11.2.tar.gz", hash = "sha256:678f662130dc540dac12de0ea73de9f89caea9dbea138f60ef6263149bf84657"}, +] + +[[package]] +name = "argparse" +version = "1.4.0" +summary = "Python command-line parsing library" +files = [ + {file = "argparse-1.4.0-py2.py3-none-any.whl", hash = "sha256:c31647edb69fd3d465a847ea3157d37bed1f95f19760b11a47aa91c04b666314"}, + {file = "argparse-1.4.0.tar.gz", hash = "sha256:62b089a55be1d8949cd2bc7e0df0bddb9e028faefc8c32038cc84862aefdd6e4"}, +] + +[[package]] +name = "babelfish" +version = "0.6.0" +requires_python = ">=3.6,<4.0" +summary = "A module to work with countries and languages" +files = [ + {file = "babelfish-0.6.0-py3-none-any.whl", hash = "sha256:268f1c6279f2a04a66837972e8a9f3dcc68e16f1201eec57d2a4b828a8b41b11"}, + {file = "babelfish-0.6.0.tar.gz", hash = "sha256:2dadfadd1b205ca5fa5dc9fa637f5b7933160a0418684c7c46a7a664033208a2"}, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.2" +requires_python = ">=3.6.0" +summary = "Screen-scraping library" +dependencies = [ + "soupsieve>1.2", +] +files = [ + {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, + {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, +] + +[[package]] +name = "black" +version = "23.7.0" +requires_python = ">=3.8" +summary = "The uncompromising code formatter." +dependencies = [ + "click>=8.0.0", + "mypy-extensions>=0.4.3", + "packaging>=22.0", + "pathspec>=0.9.0", + "platformdirs>=2", + "tomli>=1.1.0; python_version < \"3.11\"", +] +files = [ + {file = "black-23.7.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2"}, + {file = "black-23.7.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd"}, + {file = "black-23.7.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a"}, + {file = "black-23.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926"}, + {file = "black-23.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad"}, + {file = "black-23.7.0-py3-none-any.whl", hash = "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96"}, + {file = "black-23.7.0.tar.gz", hash = "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb"}, +] + +[[package]] +name = "certifi" +version = "2023.7.22" +requires_python = ">=3.6" +summary = "Python package for providing Mozilla's CA Bundle." +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.2.0" +requires_python = ">=3.7.0" +summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +files = [ + {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, + {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, +] + +[[package]] +name = "click" +version = "8.1.6" +requires_python = ">=3.7" +summary = "Composable command line interface toolkit" +dependencies = [ + "colorama; platform_system == \"Windows\"", +] +files = [ + {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"}, + {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"}, +] + +[[package]] +name = "colorama" +version = "0.4.5" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +summary = "Cross-platform colored terminal text." +files = [ + {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, + {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, +] + +[[package]] +name = "deep-translator" +version = "1.10.1" +requires_python = ">=3.7,<4.0" +summary = "A flexible free and unlimited python tool to translate between different languages in a simple way using multiple translators" +dependencies = [ + "beautifulsoup4<5.0.0,>=4.9.1", + "requests<3.0.0,>=2.23.0", +] +files = [ + {file = "deep_translator-1.10.1-py3-none-any.whl", hash = "sha256:8581e93c4d33746cd6e662607fefb26c457eae9b197de0648e3f25f324ef6534"}, + {file = "deep_translator-1.10.1.tar.gz", hash = "sha256:e99438dab70e3bebcda932e3f5e86fd3d32b43f87d66ed9f8b6816db146f1d9f"}, +] + +[[package]] +name = "deezer-python" +version = "6.1.0" +requires_python = ">=3.8,<4.0" +summary = "A friendly wrapper library for the Deezer API" +dependencies = [ + "requests>=2.18", +] +files = [ + {file = "deezer_python-6.1.0-py3-none-any.whl", hash = "sha256:ea15381cc05f978654c73097c9b1e40a5ae19ce6d5332303a2c4c5e047f73085"}, + {file = "deezer_python-6.1.0.tar.gz", hash = "sha256:84930fda42a4b91ffa287296fb92669b81c73bd5d69437801f2acc6575ca812e"}, +] + +[[package]] +name = "ebooklib" +version = "0.18" +summary = "Ebook library which can handle EPUB2/EPUB3 and Kindle format" +dependencies = [ + "lxml", + "six", +] +files = [ + {file = "EbookLib-0.18.tar.gz", hash = "sha256:38562643a7bc94d9bf56e9930b4927e4e93b5d1d0917f697a6454db5a1c1a533"}, +] + +[[package]] +name = "flask" +version = "2.2.2" +requires_python = ">=3.7" +summary = "A simple framework for building complex web applications." +dependencies = [ + "Jinja2>=3.0", + "Werkzeug>=2.2.2", + "click>=8.0", + "itsdangerous>=2.0", +] +files = [ + {file = "Flask-2.2.2-py3-none-any.whl", hash = "sha256:b9c46cc36662a7949f34b52d8ec7bb59c0d74ba08ba6cb9ce9adc1d8676d9526"}, + {file = "Flask-2.2.2.tar.gz", hash = "sha256:642c450d19c4ad482f96729bd2a8f6d32554aa1e231f4f6b4e7e5264b16cca2b"}, +] + +[[package]] +name = "flask-cors" +version = "3.0.10" +summary = "A Flask extension adding a decorator for CORS support" +dependencies = [ + "Flask>=0.9", + "Six", +] +files = [ + {file = "Flask-Cors-3.0.10.tar.gz", hash = "sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de"}, + {file = "Flask_Cors-3.0.10-py2.py3-none-any.whl", hash = "sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438"}, +] + +[[package]] +name = "flask-login" +version = "0.6.2" +requires_python = ">=3.7" +summary = "User authentication and session management for Flask." +dependencies = [ + "Flask>=1.0.4", + "Werkzeug>=1.0.1", +] +files = [ + {file = "Flask-Login-0.6.2.tar.gz", hash = "sha256:c0a7baa9fdc448cdd3dd6f0939df72eec5177b2f7abe6cb82fc934d29caac9c3"}, + {file = "Flask_Login-0.6.2-py3-none-any.whl", hash = "sha256:1ef79843f5eddd0f143c2cd994c1b05ac83c0401dc6234c143495af9a939613f"}, +] + +[[package]] +name = "flask-migrate" +version = "4.0.4" +requires_python = ">=3.6" +summary = "SQLAlchemy database migrations for Flask applications using Alembic." +dependencies = [ + "Flask-SQLAlchemy>=1.0", + "Flask>=0.9", + "alembic>=1.9.0", +] +files = [ + {file = "Flask-Migrate-4.0.4.tar.gz", hash = "sha256:73293d40b10ac17736e715b377e7b7bde474cb8105165d77474df4c3619b10b3"}, + {file = "Flask_Migrate-4.0.4-py3-none-any.whl", hash = "sha256:77580f27ab39bc68be4906a43c56d7674b45075bc4f883b1d0b985db5164d58f"}, +] + +[[package]] +name = "flask-sqlalchemy" +version = "3.0.3" +requires_python = ">=3.7" +summary = "Add SQLAlchemy support to your Flask application." +dependencies = [ + "Flask>=2.2", + "SQLAlchemy>=1.4.18", +] +files = [ + {file = "Flask-SQLAlchemy-3.0.3.tar.gz", hash = "sha256:2764335f3c9d7ebdc9ed6044afaf98aae9fa50d7a074cef55dde307ec95903ec"}, + {file = "Flask_SQLAlchemy-3.0.3-py3-none-any.whl", hash = "sha256:add5750b2f9cd10512995261ee2aa23fab85bd5626061aa3c564b33bb4aa780a"}, +] + +[[package]] +name = "get-video-properties" +version = "0.1.1" +summary = "Get video properties" +files = [ + {file = "get_video_properties-0.1.1-py3-none-any.whl", hash = "sha256:04d4f478a5211917e2a7e87ddfcb1c17734cddf8374494c3993bf825b7ad4192"}, +] + +[[package]] +name = "gitdb" +version = "4.0.10" +requires_python = ">=3.7" +summary = "Git Object Database" +dependencies = [ + "smmap<6,>=3.0.1", +] +files = [ + {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, + {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, +] + +[[package]] +name = "gitpython" +version = "3.1.31" +requires_python = ">=3.7" +summary = "GitPython is a Python library used to interact with Git repositories" +dependencies = [ + "gitdb<5,>=4.0.1", +] +files = [ + {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"}, + {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"}, +] + +[[package]] +name = "gputil" +version = "1.4.0" +summary = "GPUtil is a Python module for getting the GPU status from NVIDA GPUs using nvidia-smi." +files = [ + {file = "GPUtil-1.4.0.tar.gz", hash = "sha256:099e52c65e512cdfa8c8763fca67f5a5c2afb63469602d5dcb4d296b3661efb9"}, +] + +[[package]] +name = "greenlet" +version = "2.0.2" +requires_python = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +summary = "Lightweight in-process concurrent programming" +files = [ + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, + {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, + {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, + {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, +] + +[[package]] +name = "guessit" +version = "3.7.1" +summary = "GuessIt - a library for guessing information from video filenames." +dependencies = [ + "babelfish>=0.6.0", + "python-dateutil", + "rebulk>=3.2.0", +] +files = [ + {file = "guessit-3.7.1-py3-none-any.whl", hash = "sha256:c3be280ee8ec581a45ca6a654a92e317bf89567fdc55e7167452226f4f5b8b38"}, + {file = "guessit-3.7.1.tar.gz", hash = "sha256:2c18d982ee6db30db5d59557add0324a2b49bf3940a752947510632a2b58a3c1"}, +] + +[[package]] +name = "idna" +version = "3.4" +requires_python = ">=3.5" +summary = "Internationalized Domain Names in Applications (IDNA)" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "imagehash" +version = "4.3.1" +summary = "Image Hashing library" +dependencies = [ + "PyWavelets", + "numpy", + "pillow", + "scipy", +] +files = [ + {file = "ImageHash-4.3.1-py2.py3-none-any.whl", hash = "sha256:5ad9a5cde14fe255745a8245677293ac0d67f09c330986a351f34b614ba62fb5"}, + {file = "ImageHash-4.3.1.tar.gz", hash = "sha256:7038d1b7f9e0585beb3dd8c0a956f02b95a346c0b5f24a9e8cc03ebadaf0aa70"}, +] + +[[package]] +name = "itsdangerous" +version = "2.1.2" +requires_python = ">=3.7" +summary = "Safely pass data to untrusted environments and back." +files = [ + {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"}, + {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"}, +] + +[[package]] +name = "jinja2" +version = "3.1.2" +requires_python = ">=3.7" +summary = "A very fast and expressive template engine." +dependencies = [ + "MarkupSafe>=2.0", +] +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[[package]] +name = "levenshtein" +version = "0.20.9" +requires_python = ">=3.6" +summary = "Python extension for computing string edit distances and similarities." +dependencies = [ + "rapidfuzz<3.0.0,>=2.3.0", +] +files = [ + {file = "Levenshtein-0.20.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:105c239ec786750cd5136991c58196b440cc39b6acf3ec8227f6562c9a94e4b9"}, + {file = "Levenshtein-0.20.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f7728bea7fe6dc55ceecde0dcda4287e74fe3b6733ad42530f46aaa8d2f81d0"}, + {file = "Levenshtein-0.20.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc7eca755c13c92814c8cce8175524cf764ce38f39228b602f59eac58cfdc51a"}, + {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8a552e79d053dc1324fb90d342447fd4e15736f4cbc5363b6fbd5577f53dce9"}, + {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5474b2681ee0b7944fb1e7fe281cd44e2dfe75b03ba4558dca49c96fa0861b62"}, + {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:56e132c203b0dd8fc72a33e791c39ad0d5a25bcf24b130a1e202abbf489a3e75"}, + {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3badc94708ac05b405e795fde58a53272b90a9ee6099ecd54a345658b7b812e1"}, + {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48b9b3ae095b14dad7bc4bd219c7cd9113a7aa123a033337c85b00fe2ed565d3"}, + {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0d3a1f7328c91caeb1f857ddd2787e3f19d60cc2c688339d249ca8841da61454"}, + {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ef67c50428c99caf67d31bd209da21d9378da5f0cc3ad4f7bafb6caa78aee6f2"}, + {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:47f6d1592c0891f7355e38a302becd233336ca2f55f9a8be3a8635f946a6784f"}, + {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2891019740e874f05e0349e9f27b6af8ad837b1612f42e9c90c296d54d1404fd"}, + {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c554704eec4f4ba742febdcc79a85491f8f9a1d493cb103bb2af18536d6cf122"}, + {file = "Levenshtein-0.20.9-cp310-cp310-win32.whl", hash = "sha256:7628e356b3f9c78ad7272c3b9137f0641a1368849e749ff6f2c8fe372795806b"}, + {file = "Levenshtein-0.20.9-cp310-cp310-win_amd64.whl", hash = "sha256:ba2bafe3511194a37044cae4e7d328cca70657933052691c37eba2ca428a379d"}, + {file = "Levenshtein-0.20.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7605a94145198d19fdaaa7e29c0f8a56ad719b12386f3ae8cd8ed4cb9fa6c2e4"}, + {file = "Levenshtein-0.20.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:29db4dabfad2ddf33c7986eb6fd525c7587cca4c4d9e187365cff0a5281f5a35"}, + {file = "Levenshtein-0.20.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:965336c1772a4fc5fb2686a2a0bfaf3455dced96f19f50f278da8bc139076d31"}, + {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67235753035ac898d6475c0b29540521018db2e0027a3c1deb9aa0af0a84fd74"}, + {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:120dca58136aee3d8c7b190e30db7b6a6eb9579ea5712df84ad076a389801743"}, + {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6496ea66a6f755e48c0d82f1eee396d16edcd5592d4b3677d26fa789a636a728"}, + {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0af20327acc2c904d11611cb3a0d8d17f80c279a12e0b84189eafc35297186d"}, + {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d2f891ef53afbab6cf2eeb92ff13151884d17dc80a2d6d3c7ae74d7738b772"}, + {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2ab9c72380582bf4745d1c5b055b1df0c85f7a980a04bd7603a855dd91478c0f"}, + {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6de13be3eb5ac48053fb1635a7b4daa936b9114ad4b264942e9eb709fcaa41dd"}, + {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9fc296860588251d8d72b4f4637cca4eef7351e042a7a23d44e6385aef1e160"}, + {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:35777b20fe35858248c22da37984469e6dd1278f55d17c53378312853d5d683d"}, + {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b9e0642ddb4c431f77c38cec9edbd0317e26c3f37d072ccf281ab58926dce69"}, + {file = "Levenshtein-0.20.9-cp311-cp311-win32.whl", hash = "sha256:f88ec322d86d3cc9d3936dbf6b421ad813950c2658599d48ac4ede59f2a6047e"}, + {file = "Levenshtein-0.20.9-cp311-cp311-win_amd64.whl", hash = "sha256:2907a6888455f9915d5b656f5d058f63eaf6063b2c7f0f1ff6bc05706ae5bc39"}, + {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f674cc75f127692525563155e500a3fa16aaf24dafd33a9bcda46e2979f793a1"}, + {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a34e3fd21acb31fcd29a0c8353dca74dfbb59957210a6f142505907a9dff3d59"}, + {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0ddddf2beafd1a2e17a87f80be562a7f7478e6098ccfc15de4c879972dfa2f9"}, + {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9649af1a896a4a7fc7f6f1fd093e8a92f463297f56c7bd0f8d7d16dfabeb236d"}, + {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d7bd7f25336849027fbe5ed32b6ffd404436727d78a014e348dcd17347c73fd8"}, + {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0371d996ae81089296f42b6e886c7bf138d1cb0f002b0c724a9e5d689b29b5a0"}, + {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7e00e2fda9f225b5f4537647f6195cf220d468532739d3390eaf082b1d76c87"}, + {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1600f5ebe2f2aebf13e88cf488ec2e5ce25f7a42b5846335018693baf4ea63bd"}, + {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bcd59fcf06aaedda98da185ec289dc2c2c9922ce789f6a9c101709d4a22cac9"}, + {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:1549e307028fa5c3a8cf28ae8bcb1f6072df2abf7f36b9d7adf7fd60690fe372"}, + {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:795f2e95d09a33c66c73cd49be3ee632fb4b8c41be72c0cb8df29a329ce7d111"}, + {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:726bfb361d3b6786bea31392752f0ffcca568db7dc3f1e274f1b529489b8ad05"}, + {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e0fd315132786375de532355fa06b2f11c4b4af5784b7e064dc54b6ee0c3281"}, + {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0674bc0549d5ea9edb934b3b03a160a116cc410feb5739a51f9c4f618ee674e3"}, + {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1ef8f3ecdfca5d6f0538226338d58617270439a1cc9b6cacb30a388984bb1608"}, + {file = "Levenshtein-0.20.9.tar.gz", hash = "sha256:70a8ad5e28bb76d87da1eb3f31de940836596547d6d01317c2289f5b7cd0b0ea"}, +] + +[[package]] +name = "logging" +version = "0.4.9.6" +summary = "A logging module for Python" +files = [ + {file = "logging-0.4.9.6.tar.gz", hash = "sha256:26f6b50773f085042d301085bd1bf5d9f3735704db9f37c1ce6d8b85c38f2417"}, +] + +[[package]] +name = "lxml" +version = "4.9.3" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" +summary = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +files = [ + {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"}, + {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"}, + {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"}, + {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"}, + {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"}, + {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"}, + {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"}, + {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"}, + {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"}, + {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"}, + {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"}, + {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"}, + {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"}, + {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"}, + {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"}, + {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"}, + {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"}, + {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"}, +] + +[[package]] +name = "mako" +version = "1.2.4" +requires_python = ">=3.7" +summary = "A super-fast templating language that borrows the best ideas from the existing templating languages." +dependencies = [ + "MarkupSafe>=0.9.2", +] +files = [ + {file = "Mako-1.2.4-py3-none-any.whl", hash = "sha256:c97c79c018b9165ac9922ae4f32da095ffd3c4e6872b45eded42926deea46818"}, + {file = "Mako-1.2.4.tar.gz", hash = "sha256:d60a3903dc3bb01a18ad6a89cdbe2e4eadc69c0bc8ef1e3773ba53d44c3f7a34"}, +] + +[[package]] +name = "markupsafe" +version = "2.1.3" +requires_python = ">=3.7" +summary = "Safely add untrusted strings to HTML/XML markup." +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +requires_python = ">=3.5" +summary = "Type system extensions for programs checked with the mypy type checker." +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "natsort" +version = "8.4.0" +requires_python = ">=3.7" +summary = "Simple yet flexible natural sorting in Python." +files = [ + {file = "natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c"}, + {file = "natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581"}, +] + +[[package]] +name = "numpy" +version = "1.25.2" +requires_python = ">=3.9" +summary = "Fundamental package for array computing in Python" +files = [ + {file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"}, + {file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"}, + {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"}, + {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"}, + {file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"}, + {file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"}, + {file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"}, + {file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"}, + {file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"}, + {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"}, + {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"}, + {file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"}, + {file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"}, + {file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"}, + {file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"}, +] + +[[package]] +name = "opencv-python" +version = "4.5.5.64" +requires_python = ">=3.6" +summary = "Wrapper package for OpenCV python bindings." +dependencies = [ + "numpy>=1.14.5; python_version >= \"3.7\"", + "numpy>=1.17.3; python_version >= \"3.8\"", + "numpy>=1.19.3; python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\"", + "numpy>=1.19.3; python_version >= \"3.9\"", + "numpy>=1.21.2; python_version >= \"3.10\"", + "numpy>=1.21.2; python_version >= \"3.6\" and platform_system == \"Darwin\" and platform_machine == \"arm64\"", +] +files = [ + {file = "opencv-python-4.5.5.64.tar.gz", hash = "sha256:f65de0446a330c3b773cd04ba10345d8ce1b15dcac3f49770204e37602d0b3f7"}, + {file = "opencv_python-4.5.5.64-cp36-abi3-macosx_10_15_x86_64.whl", hash = "sha256:a512a0c59b6fec0fac3844b2f47d6ecb1a9d18d235e6c5491ce8dbbe0663eae8"}, + {file = "opencv_python-4.5.5.64-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6138b6903910e384067d001763d40f97656875487381aed32993b076f44375"}, + {file = "opencv_python-4.5.5.64-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b293ced62f4360d9f11cf72ae7e9df95320ff7bf5b834d87546f844e838c0c35"}, + {file = "opencv_python-4.5.5.64-cp36-abi3-win32.whl", hash = "sha256:6247e584813c00c3b9ed69a795da40d2c153dc923d0182e957e1c2f00a554ac2"}, + {file = "opencv_python-4.5.5.64-cp36-abi3-win_amd64.whl", hash = "sha256:408d5332550287aa797fd06bef47b2dfed163c6787668cc82ef9123a9484b56a"}, + {file = "opencv_python-4.5.5.64-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:7787bb017ae93d5f9bb1b817ac8e13e45dd193743cb648498fcab21d00cf20a3"}, +] + +[[package]] +name = "overrides" +version = "7.4.0" +requires_python = ">=3.6" +summary = "A decorator to automatically detect mismatch when overriding a method." +files = [ + {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, + {file = "overrides-7.4.0.tar.gz", hash = "sha256:9502a3cca51f4fac40b5feca985b6703a5c1f6ad815588a7ca9e285b9dca6757"}, +] + +[[package]] +name = "packaging" +version = "23.1" +requires_python = ">=3.7" +summary = "Core utilities for Python packages" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[[package]] +name = "pathspec" +version = "0.11.2" +requires_python = ">=3.7" +summary = "Utility library for gitignore style pattern matching of file paths." +files = [ + {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"}, + {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"}, +] + +[[package]] +name = "pillow" +version = "9.5.0" +requires_python = ">=3.7" +summary = "Python Imaging Library (Fork)" +files = [ + {file = "Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16"}, + {file = "Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a"}, + {file = "Pillow-9.5.0-cp310-cp310-win32.whl", hash = "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44"}, + {file = "Pillow-9.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296"}, + {file = "Pillow-9.5.0-cp311-cp311-win32.whl", hash = "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec"}, + {file = "Pillow-9.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4"}, + {file = "Pillow-9.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089"}, + {file = "Pillow-9.5.0-cp312-cp312-win32.whl", hash = "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb"}, + {file = "Pillow-9.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799"}, + {file = "Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1"}, +] + +[[package]] +name = "platformdirs" +version = "3.10.0" +requires_python = ">=3.7" +summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +files = [ + {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"}, + {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"}, +] + +[[package]] +name = "pyarr" +version = "5.0.0" +requires_python = ">=3.9,<4.0" +summary = "Synchronous Sonarr, Radarr, Lidarr and Readarr API's for Python" +dependencies = [ + "overrides<8.0.0,>=7.3.1", + "requests<3.0.0,>=2.28.2", + "types-requests<3.0.0.0,>=2.28.11.17", +] +files = [ + {file = "pyarr-5.0.0-py3-none-any.whl", hash = "sha256:6a961ee6789afa8962c0edd99a87093c7a7421d7419e1c752fa03a61e6233a51"}, + {file = "pyarr-5.0.0.tar.gz", hash = "sha256:7b115b5fd81a3715f75b5ecdf42943d21eedeaea6f02514fa22ab5445f4650d2"}, +] + +[[package]] +name = "pycountry" +version = "22.3.5" +requires_python = ">=3.6, <4" +summary = "ISO country, subdivision, language, currency and script definitions and their translations" +dependencies = [ + "setuptools", +] +files = [ + {file = "pycountry-22.3.5.tar.gz", hash = "sha256:b2163a246c585894d808f18783e19137cb70a0c18fb36748dc01fc6f109c1646"}, +] + +[[package]] +name = "pymupdf" +version = "1.22.5" +requires_python = ">=3.7" +summary = "Python bindings for the PDF toolkit and renderer MuPDF" +files = [ + {file = "PyMuPDF-1.22.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:640b8e4cb116dd87a3c854e49808a4f63625e663a7bc5b1efc971db5b4775367"}, + {file = "PyMuPDF-1.22.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:17efbbf0e2d99d24cfc302fac512928eb294f10b7b67d597d04dafd012812e4e"}, + {file = "PyMuPDF-1.22.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc9b9bf0f2beea3911750d2d66247608be8cbad33b7a050cacec9e4c105a1ca"}, + {file = "PyMuPDF-1.22.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7734a32a91eea4b502b8f9d2915cdba0a372226e14fb983876d763110dcefef"}, + {file = "PyMuPDF-1.22.5-cp310-cp310-win32.whl", hash = "sha256:c2fd70ca9961f7871810dce1b7d0a42a69eb8ff2d786621123952bd505a6867e"}, + {file = "PyMuPDF-1.22.5-cp310-cp310-win_amd64.whl", hash = "sha256:add310c96df6933cfb4ce3821c9c7b5c133e8aa609a4c9416e1c7af546163488"}, + {file = "PyMuPDF-1.22.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:017aaba511526facfc928e9d95d2c10d28a2821b05b9039bf422031a7da8584e"}, + {file = "PyMuPDF-1.22.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6fe5e44a14864d921fb96669a82f9635846806176f77f1d73c61feb84ebf4d84"}, + {file = "PyMuPDF-1.22.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e74d766f79e41e10c51865233042ab2cc4612ca7942812dca0603f4d0f8f73d"}, + {file = "PyMuPDF-1.22.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8175452fcc99a0af6429d8acd87682a3a70c5879d73532c7327f71ce508a35"}, + {file = "PyMuPDF-1.22.5-cp311-cp311-win32.whl", hash = "sha256:42f59f4999d7f8b35c850050bd965e98c081a7d9b92d5f9dcf30203b30d06876"}, + {file = "PyMuPDF-1.22.5-cp311-cp311-win_amd64.whl", hash = "sha256:3d71c47aa14b73f2df7d03be8c547a05df6c6898d8c63a0f752b26f206eefd3c"}, + {file = "PyMuPDF-1.22.5.tar.gz", hash = "sha256:5ec8d5106752297529d0d68d46cfc4ce99914aabd99be843f1599a1842d63fe9"}, +] + +[[package]] +name = "pypdf2" +version = "3.0.1" +requires_python = ">=3.6" +summary = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" +files = [ + {file = "PyPDF2-3.0.1.tar.gz", hash = "sha256:a74408f69ba6271f71b9352ef4ed03dc53a31aa404d29b5d31f53bfecfee1440"}, + {file = "pypdf2-3.0.1-py3-none-any.whl", hash = "sha256:d16e4205cfee272fbdc0568b68d82be796540b1537508cef59388f839c191928"}, +] + +[[package]] +name = "pypresence" +version = "4.2.1" +requires_python = ">=3.5" +summary = "Discord RPC client written in Python" +files = [ + {file = "pypresence-4.2.1-py2.py3-none-any.whl", hash = "sha256:12197b5f51c21e3e555b17f85d3e55023f4ad83b6fff72cd6387659ffd484a02"}, + {file = "pypresence-4.2.1.tar.gz", hash = "sha256:691daf98c8189fd216d988ebfc67779e0f664211512d9843f37ab0d51d4de066"}, +] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +summary = "Extensions to the standard Python datetime module" +dependencies = [ + "six>=1.5", +] +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[[package]] +name = "python-levenshtein" +version = "0.20.9" +requires_python = ">=3.6" +summary = "Python extension for computing string edit distances and similarities." +dependencies = [ + "Levenshtein==0.20.9", +] +files = [ + {file = "python-Levenshtein-0.20.9.tar.gz", hash = "sha256:4c507b1e26de29374153982fa477cea741edf095d892773343b4961beacac834"}, + {file = "python_Levenshtein-0.20.9-py3-none-any.whl", hash = "sha256:2a6f8c97ba554d7399e0b450e1fce5d90d6354b1c1762e419671de27f25736c5"}, +] + +[[package]] +name = "pywavelets" +version = "1.4.1" +requires_python = ">=3.8" +summary = "PyWavelets, wavelet transform module" +dependencies = [ + "numpy>=1.17.3", +] +files = [ + {file = "PyWavelets-1.4.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:d854411eb5ee9cb4bc5d0e66e3634aeb8f594210f6a1bed96dbed57ec70f181c"}, + {file = "PyWavelets-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:231b0e0b1cdc1112f4af3c24eea7bf181c418d37922a67670e9bf6cfa2d544d4"}, + {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:754fa5085768227c4f4a26c1e0c78bc509a266d9ebd0eb69a278be7e3ece943c"}, + {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da7b9c006171be1f9ddb12cc6e0d3d703b95f7f43cb5e2c6f5f15d3233fcf202"}, + {file = "PyWavelets-1.4.1-cp310-cp310-win32.whl", hash = "sha256:67a0d28a08909f21400cb09ff62ba94c064882ffd9e3a6b27880a111211d59bd"}, + {file = "PyWavelets-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91d3d393cffa634f0e550d88c0e3f217c96cfb9e32781f2960876f1808d9b45b"}, + {file = "PyWavelets-1.4.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:64c6bac6204327321db30b775060fbe8e8642316e6bff17f06b9f34936f88875"}, + {file = "PyWavelets-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f19327f2129fb7977bc59b966b4974dfd72879c093e44a7287500a7032695de"}, + {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad987748f60418d5f4138db89d82ba0cb49b086e0cbb8fd5c3ed4a814cfb705e"}, + {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875d4d620eee655346e3589a16a73790cf9f8917abba062234439b594e706784"}, + {file = "PyWavelets-1.4.1-cp311-cp311-win32.whl", hash = "sha256:7231461d7a8eb3bdc7aa2d97d9f67ea5a9f8902522818e7e2ead9c2b3408eeb1"}, + {file = "PyWavelets-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:daf0aa79842b571308d7c31a9c43bc99a30b6328e6aea3f50388cd8f69ba7dbc"}, + {file = "PyWavelets-1.4.1.tar.gz", hash = "sha256:6437af3ddf083118c26d8f97ab43b0724b956c9f958e9ea788659f6a2834ba93"}, +] + +[[package]] +name = "rapidfuzz" +version = "2.15.1" +requires_python = ">=3.7" +summary = "rapid fuzzy string matching" +files = [ + {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fc0bc259ebe3b93e7ce9df50b3d00e7345335d35acbd735163b7c4b1957074d3"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d59fb3a410d253f50099d7063855c2b95df1ef20ad93ea3a6b84115590899f25"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c525a3da17b6d79d61613096c8683da86e3573e807dfaecf422eea09e82b5ba6"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4deae6a918ecc260d0c4612257be8ba321d8e913ccb43155403842758c46fbe"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2577463d10811386e704a3ab58b903eb4e2a31b24dfd9886d789b0084d614b01"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f67d5f56aa48c0da9de4ab81bffb310683cf7815f05ea38e5aa64f3ba4368339"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7927722ff43690e52b3145b5bd3089151d841d350c6f8378c3cfac91f67573a"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6534afc787e32c4104f65cdeb55f6abe4d803a2d0553221d00ef9ce12788dcde"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d0ae6ec79a1931929bb9dd57bc173eb5ba4c7197461bf69e3a34b6dd314feed2"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:be7ccc45c4d1a7dfb595f260e8022a90c6cb380c2a346ee5aae93f85c96d362b"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8ba013500a2b68c64b2aecc5fb56a2dad6c2872cf545a0308fd044827b6e5f6a"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4d9f7d10065f657f960b48699e7dddfce14ab91af4bab37a215f0722daf0d716"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7e24a1b802cea04160b3fccd75d2d0905065783ebc9de157d83c14fb9e1c6ce2"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-win32.whl", hash = "sha256:dffdf03499e0a5b3442951bb82b556333b069e0661e80568752786c79c5b32de"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d150d90a7c6caae7962f29f857a4e61d42038cfd82c9df38508daf30c648ae7"}, + {file = "rapidfuzz-2.15.1-cp310-cp310-win_arm64.whl", hash = "sha256:87c30e9184998ff6eb0fa9221f94282ce7c908fd0da96a1ef66ecadfaaa4cdb7"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6986413cb37035eb796e32f049cbc8c13d8630a4ac1e0484e3e268bb3662bd1b"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a72f26e010d4774b676f36e43c0fc8a2c26659efef4b3be3fd7714d3491e9957"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b5cd54c98a387cca111b3b784fc97a4f141244bbc28a92d4bde53f164464112e"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da7fac7c3da39f93e6b2ebe386ed0ffe1cefec91509b91857f6e1204509e931f"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f976e76ac72f650790b3a5402431612175b2ac0363179446285cb3c901136ca9"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:abde47e1595902a490ed14d4338d21c3509156abb2042a99e6da51f928e0c117"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca8f1747007a3ce919739a60fa95c5325f7667cccf6f1c1ef18ae799af119f5e"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c35da09ab9797b020d0d4f07a66871dfc70ea6566363811090353ea971748b5a"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a3a769ca7580686a66046b77df33851b3c2d796dc1eb60c269b68f690f3e1b65"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d50622efefdb03a640a51a6123748cd151d305c1f0431af762e833d6ffef71f0"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b7461b0a7651d68bc23f0896bffceea40f62887e5ab8397bf7caa883592ef5cb"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:074ee9e17912e025c72a5780ee4c7c413ea35cd26449719cc399b852d4e42533"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7025fb105a11f503943f17718cdb8241ea3bb4d812c710c609e69bead40e2ff0"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-win32.whl", hash = "sha256:2084d36b95139413cef25e9487257a1cc892b93bd1481acd2a9656f7a1d9930c"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:5a738fcd24e34bce4b19126b92fdae15482d6d3a90bd687fd3d24ce9d28ce82d"}, + {file = "rapidfuzz-2.15.1-cp311-cp311-win_arm64.whl", hash = "sha256:dc3cafa68cfa54638632bdcadf9aab89a3d182b4a3f04d2cad7585ed58ea8731"}, + {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b89d1126be65c85763d56e3b47d75f1a9b7c5529857b4d572079b9a636eaa8a7"}, + {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19b7460e91168229768be882ea365ba0ac7da43e57f9416e2cfadc396a7df3c2"}, + {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c33c03e7092642c38f8a15ca2d8fc38da366f2526ec3b46adf19d5c7aa48ba"}, + {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040faca2e26d9dab5541b45ce72b3f6c0e36786234703fc2ac8c6f53bb576743"}, + {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:6e2a3b23e1e9aa13474b3c710bba770d0dcc34d517d3dd6f97435a32873e3f28"}, + {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e597b9dfd6dd180982684840975c458c50d447e46928efe3e0120e4ec6f6686"}, + {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d14752c9dd2036c5f36ebe8db5f027275fa7d6b3ec6484158f83efb674bab84e"}, + {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558224b6fc6124d13fa32d57876f626a7d6188ba2a97cbaea33a6ee38a867e31"}, + {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c89cfa88dc16fd8c9bcc0c7f0b0073f7ef1e27cceb246c9f5a3f7004fa97c4d"}, + {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:509c5b631cd64df69f0f011893983eb15b8be087a55bad72f3d616b6ae6a0f96"}, + {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0f73a04135a03a6e40393ecd5d46a7a1049d353fc5c24b82849830d09817991f"}, + {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c99d53138a2dfe8ada67cb2855719f934af2733d726fbf73247844ce4dd6dd5"}, + {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f01fa757f0fb332a1f045168d29b0d005de6c39ee5ce5d6c51f2563bb53c601b"}, + {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60368e1add6e550faae65614844c43f8a96e37bf99404643b648bf2dba92c0fb"}, + {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785744f1270828cc632c5a3660409dee9bcaac6931a081bae57542c93e4d46c4"}, + {file = "rapidfuzz-2.15.1.tar.gz", hash = "sha256:d62137c2ca37aea90a11003ad7dc109c8f1739bfbe5a9a217f3cdb07d7ac00f6"}, +] + +[[package]] +name = "rarfile" +version = "4.0" +summary = "RAR archive reader for Python" +files = [ + {file = "rarfile-4.0-py3-none-any.whl", hash = "sha256:1094869119012f95c31a6f22cc3a9edbdca61861b805241116adbe2d737b68f8"}, + {file = "rarfile-4.0.tar.gz", hash = "sha256:67548769229c5bda0827c1663dce3f54644f9dbfba4ae86d4da2b2afd3e602a1"}, +] + +[[package]] +name = "rebulk" +version = "3.2.0" +summary = "Rebulk - Define simple search patterns in bulk to perform advanced matching on any string." +files = [ + {file = "rebulk-3.2.0-py3-none-any.whl", hash = "sha256:6bc31ae4b37200623c5827d2f539f9ec3e52b50431322dad8154642a39b0a53e"}, + {file = "rebulk-3.2.0.tar.gz", hash = "sha256:0d30bf80fca00fa9c697185ac475daac9bde5f646ce3338c9ff5d5dc1ebdfebc"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +requires_python = ">=3.7" +summary = "Python HTTP for Humans." +dependencies = [ + "certifi>=2017.4.17", + "charset-normalizer<4,>=2", + "idna<4,>=2.5", + "urllib3<3,>=1.21.1", +] +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[[package]] +name = "scipy" +version = "1.9.3" +requires_python = ">=3.8" +summary = "Fundamental algorithms for scientific computing in Python" +dependencies = [ + "numpy<1.26.0,>=1.18.5", +] +files = [ + {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, + {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, +] + +[[package]] +name = "setuptools" +version = "68.0.0" +requires_python = ">=3.7" +summary = "Easily download, build, install, upgrade, and uninstall Python packages" +files = [ + {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, + {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, +] + +[[package]] +name = "six" +version = "1.16.0" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +summary = "Python 2 and 3 compatibility utilities" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.0" +requires_python = ">=3.6" +summary = "A pure Python implementation of a sliding window memory map manager" +files = [ + {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, + {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, +] + +[[package]] +name = "soupsieve" +version = "2.4.1" +requires_python = ">=3.7" +summary = "A modern CSS selector implementation for Beautiful Soup." +files = [ + {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, + {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, +] + +[[package]] +name = "sqlalchemy" +version = "1.4.45" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +summary = "Database Abstraction Library" +dependencies = [ + "greenlet!=0.4.17; python_version >= \"3\" and (platform_machine == \"aarch64\" or (platform_machine == \"ppc64le\" or (platform_machine == \"x86_64\" or (platform_machine == \"amd64\" or (platform_machine == \"AMD64\" or (platform_machine == \"win32\" or platform_machine == \"WIN32\"))))))", +] +files = [ + {file = "SQLAlchemy-1.4.45-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:ca152ffc7f0aa069c95fba46165030267ec5e4bb0107aba45e5e9e86fe4d9363"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06055476d38ed7915eeed22b78580556d446d175c3574a01b9eb04d91f3a8b2e"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:081e2a2d75466353c738ca2ee71c0cfb08229b4f9909b5fa085f75c48d021471"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96821d806c0c90c68ce3f2ce6dd529c10e5d7587961f31dd5c30e3bfddc4545d"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-win32.whl", hash = "sha256:c8051bff4ce48cbc98f11e95ac46bfd1e36272401070c010248a3230d099663f"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-win_amd64.whl", hash = "sha256:16ad798fc121cad5ea019eb2297127b08c54e1aa95fe17b3fea9fdbc5c34fe62"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:099efef0de9fbda4c2d7cb129e4e7f812007901942259d4e6c6e19bd69de1088"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a29d02c9e6f6b105580c5ed7afb722b97bc2e2fdb85e1d45d7ddd8440cfbca"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc10423b59d6d032d6dff0bb42aa06dc6a8824eb6029d70c7d1b6981a2e7f4d8"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-win32.whl", hash = "sha256:1a92685db3b0682776a5abcb5f9e9addb3d7d9a6d841a452a17ec2d8d457bea7"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-win_amd64.whl", hash = "sha256:db3ccbce4a861bf4338b254f95916fc68dd8b7aa50eea838ecdaf3a52810e9c0"}, + {file = "SQLAlchemy-1.4.45.tar.gz", hash = "sha256:fd69850860093a3f69fefe0ab56d041edfdfe18510b53d9a2eaecba2f15fa795"}, +] + +[[package]] +name = "tinytag" +version = "1.9.0" +requires_python = ">=2.7" +summary = "Read music meta data and length of MP3, OGG, OPUS, MP4, M4A, FLAC, WMA and Wave files" +files = [ + {file = "tinytag-1.9.0.tar.gz", hash = "sha256:f8d71110e1e680a33d99202e00a5a698481d25d20173b81ba3e863423979e014"}, +] + +[[package]] +name = "tmdbv3api" +version = "1.9.0" +summary = "A lightweight Python library for The Movie Database (TMDb) API." +dependencies = [ + "requests", +] +files = [ + {file = "tmdbv3api-1.9.0-py3-none-any.whl", hash = "sha256:2bcd8c6e8902397860715a71045f200ecc3ee06804ecf786cb4c1e09b2deeba8"}, + {file = "tmdbv3api-1.9.0.tar.gz", hash = "sha256:504c5da6b99c4516ff160a01576112d097f209c0534f943c15c4b56cbd92c33b"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +requires_python = ">=3.7" +summary = "A lil' TOML parser" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "types-requests" +version = "2.31.0.2" +summary = "Typing stubs for requests" +dependencies = [ + "types-urllib3", +] +files = [ + {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"}, + {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"}, +] + +[[package]] +name = "types-urllib3" +version = "1.26.25.14" +summary = "Typing stubs for urllib3" +files = [ + {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, + {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, +] + +[[package]] +name = "typing-extensions" +version = "4.7.1" +requires_python = ">=3.7" +summary = "Backported and Experimental Type Hints for Python 3.7+" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] + +[[package]] +name = "unidecode" +version = "1.3.6" +requires_python = ">=3.5" +summary = "ASCII transliterations of Unicode text" +files = [ + {file = "Unidecode-1.3.6-py3-none-any.whl", hash = "sha256:547d7c479e4f377b430dd91ac1275d593308dce0fc464fb2ab7d41f82ec653be"}, + {file = "Unidecode-1.3.6.tar.gz", hash = "sha256:fed09cf0be8cf415b391642c2a5addfc72194407caee4f98719e40ec2a72b830"}, +] + +[[package]] +name = "urllib3" +version = "2.0.4" +requires_python = ">=3.7" +summary = "HTTP library with thread-safe connection pooling, file post, and more." +files = [ + {file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"}, + {file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"}, +] + +[[package]] +name = "werkzeug" +version = "2.2.2" +requires_python = ">=3.7" +summary = "The comprehensive WSGI web application library." +dependencies = [ + "MarkupSafe>=2.1.1", +] +files = [ + {file = "Werkzeug-2.2.2-py3-none-any.whl", hash = "sha256:f979ab81f58d7318e064e99c4506445d60135ac5cd2e177a2de0089bfd4c9bd5"}, + {file = "Werkzeug-2.2.2.tar.gz", hash = "sha256:7ea2d48322cc7c0f8b3a215ed73eabd7b5d75d0b50e31ab006286ccff9e00b8f"}, +] diff --git a/pyproject.toml b/pyproject.toml index 71ea377..f9b583a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] -name = "chocolate_app" +name = "Chocolate" version = "7.0.0" -description = "Chocolate is a modern media manager written in Python" +description = "" authors = [ {name = "Imprevisible", email = "impr.visible@gmail.com"}, ] @@ -38,13 +38,14 @@ dependencies = [ "natsort>=8.4.0", "PyMuPdf>=1.22.5", ] -requires-python = ">=3.11" +requires-python = ">=3.10" readme = "README.md" -license = {text = "GPL3.0"} +license = {text = "GPL-3.0"} [build-system] requires = ["pdm-backend"] build-backend = "pdm.backend" + [tool.pdm.dev-dependencies] lint = [ "black>=23.7.0", diff --git a/src/chocolate_app/__init__.py b/src/chocolate_app/__init__.py index bc6d86b..871fe27 100644 --- a/src/chocolate_app/__init__.py +++ b/src/chocolate_app/__init__.py @@ -1,183 +1,190 @@ -import os -import configparser -import platform -import argparse -import logging -import pathlib -import shutil - -from flask import Flask -from flask_sqlalchemy import SQLAlchemy -from flask_cors import CORS -from flask_login import LoginManager -from flask_migrate import Migrate - -from tmdbv3api import TMDb - -DB = SQLAlchemy() -MIGRATE = Migrate() -LOGIN_MANAGER = LoginManager() -all_auth_tokens = {} - - -class ChocolateException(Exception): - """Base class for exceptions in Chocolate""" - - -class UnsupportedSystemDefaultPath(ChocolateException): - """Raised when the default path for the config file and the database file is not supported by Chocolate""" - - -parser = argparse.ArgumentParser("Chocolate") -parser.add_argument("--config", help="Path to the config file (a .ini file)") -parser.add_argument("--db", help="Path to the database file (a .db file)") -parser.add_argument("--images", help="Path to the images folder (a folder)") -parser.add_argument("--no-scans", help="Disable startup scans", action="store_true") - -ARGUMENTS = parser.parse_args() - -paths = { - "Windows": { - "config": f"{os.getenv('APPDATA')}/Chocolate/config.ini", - "db": f"{os.getenv('APPDATA')}/Chocolate/database.db", - "images": f"{os.getenv('APPDATA')}/Chocolate/images", - }, - "Linux": { - "config": "/var/chocolate/config.ini", - "db": "/var/chocolate/database.db", - "images": "/var/chocolate/images/", - }, - "Darwin": { - "config": f"{os.getenv('HOME')}/Library/Application Support/Chocolate/config.ini", - "db": f"{os.getenv('HOME')}/Library/Application Support/Chocolate/database.db", - "images": f"{os.getenv('HOME')}/Library/Application Support/Chocolate/images/", - }, -} - -OPERATING_SYSTEM = platform.system() - -if OPERATING_SYSTEM not in paths: - raise UnsupportedSystemDefaultPath( - f"No known default file path for the config / database on your operating system ({OPERATING_SYSTEM}). Please use --config and --database path or create a pull request to add your system to the one supported by Chocolate" - ) - -CONFIG_PATH = ARGUMENTS.config or paths[OPERATING_SYSTEM]["config"] -CONFIG_PATH = CONFIG_PATH.replace("\\", "/") - -DB_PATH = ARGUMENTS.db or paths[OPERATING_SYSTEM]["db"] -DB_PATH = DB_PATH.replace("\\", "/") - -IMAGES_PATH = ARGUMENTS.images or paths[OPERATING_SYSTEM]["images"] -IMAGES_PATH = IMAGES_PATH.replace("\\", "/") -if IMAGES_PATH.endswith("/"): - IMAGES_PATH = IMAGES_PATH[:-1] - - -def create_app(): - is_in_docker = os.environ.get("AM_I_IN_A_DOCKER_CONTAINER", False) - TEMPLATE_FOLDER = "" - - if is_in_docker: - dir_path = "/chocolate" - TEMPLATE_FOLDER = f"{dir_path}/templates" - else: - dir_path = pathlib.Path(__package__).parent - TEMPLATE_FOLDER = f"{dir_path}/templates" - - if not os.path.isdir(IMAGES_PATH): - os.mkdir(IMAGES_PATH) - if not os.path.isdir(f"{IMAGES_PATH}/avatars"): - os.mkdir(f"{IMAGES_PATH}/avatars") - - app = Flask( - __name__, static_folder=f"{dir_path}/static", template_folder=TEMPLATE_FOLDER - ) - - app.secret_key = "ChocolateDBPassword" - - CORS(app, supports_credentials=True, resources={r"/*": {"origins": "*"}}) - app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{DB_PATH}" - app.config["MAX_CONTENT_LENGTH"] = 4096 * 4096 - app.config["UPLOAD_FOLDER"] = f"{dir_path}/static/img/" - app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False - app.config["DIR_PATH"] = dir_path - app.config["JSON_AS_ASCII"] = False - - from .routes.users import users_bp - from .routes.settings import settings_bp - from .routes.libraries import libraries_bp - from .routes.arr import arr_bp - - app.register_blueprint(users_bp) - app.register_blueprint(settings_bp) - app.register_blueprint(libraries_bp) - app.register_blueprint(arr_bp) - - DB.init_app(app) - MIGRATE.init_app(app, DB) - LOGIN_MANAGER.init_app(app) - LOGIN_MANAGER.login_view = "login" - - return app - - -def check_dependencies(): - if not shutil.which("ffmpeg"): - logging.warning( - "ffmpeg is not installed. Chocolate will not be able to play videos." - ) - - -def get_dir_path(): - is_in_docker = os.environ.get("AM_I_IN_A_DOCKER_CONTAINER", False) - - if is_in_docker: - dir_path = "/chocolate" - else: - dir_path = os.path.dirname(__file__).replace("\\", "/") - - return dir_path - - -def create_tmdb(): - tmdb = TMDb() - api_key_tmdb = config["APIKeys"]["TMDB"] - if api_key_tmdb == "Empty": - print( - "Follow this tutorial to get your TMDB API Key : https://developers.themoviedb.org/3/getting-started/introduction" - ) - tmdb.api_key = config["APIKeys"]["TMDB"] - tmdb.language = config["ChocolateSettings"]["language"] - - return tmdb - - -def get_config(): - if not os.path.exists(CONFIG_PATH): - logging.warning( - f"Config file not found at {CONFIG_PATH}. Creating a new one..." - ) - - if not os.path.isdir(os.path.dirname(CONFIG_PATH)): - os.mkdir(os.path.dirname(CONFIG_PATH)) - - with open(f"{get_dir_path()}/empty_config.ini", "r") as empty_config: - with open(CONFIG_PATH, "w") as config: - config.write(empty_config.read()) - - config = configparser.ConfigParser() - config.read(CONFIG_PATH) - if config["ChocolateSettings"]["language"] == "Empty": - config["ChocolateSettings"]["language"] = "EN" - return config - - -def write_config(config): - with open(CONFIG_PATH, "w") as configfile: - config.write(configfile) - - -check_dependencies() - -config = get_config() -tmdb = create_tmdb() +import os +import configparser +import platform +import argparse +import logging +import pathlib +import shutil + +from flask import Flask +from flask_sqlalchemy import SQLAlchemy +from flask_cors import CORS +from flask_login import LoginManager +from flask_migrate import Migrate + +from tmdbv3api import TMDb + +DB = SQLAlchemy() +MIGRATE = Migrate() +LOGIN_MANAGER = LoginManager() +all_auth_tokens = {} + + +class ChocolateException(Exception): + """Base class for exceptions in Chocolate""" + + +class UnsupportedSystemDefaultPath(ChocolateException): + """Raised when the default path for the config file and the database file is not supported by Chocolate""" + + +parser = argparse.ArgumentParser("Chocolate") +parser.add_argument("--config", help="Path to the config file (a .ini file)") +parser.add_argument("--db", help="Path to the database file (a .db file)") +parser.add_argument("--images", help="Path to the images folder (a folder)") +parser.add_argument("--logs", help="Path to the logs file (a .log file)") +parser.add_argument("--no-scans", help="Disable startup scans", action="store_true") + +ARGUMENTS = parser.parse_args() + +paths = { + "Windows": { + "config": f"{os.getenv('APPDATA')}/Chocolate/config.ini", + "db": f"{os.getenv('APPDATA')}/Chocolate/database.db", + "images": f"{os.getenv('APPDATA')}/Chocolate/images", + "logs": f"{os.getenv('APPDATA')}/Chocolate/server.log", + }, + "Linux": { + "config": "/var/chocolate/config.ini", + "db": "/var/chocolate/database.db", + "images": "/var/chocolate/images/", + "logs": "/var/chocolate/server.log", + }, + "Darwin": { + "config": f"{os.getenv('HOME')}/Library/Application Support/Chocolate/config.ini", + "db": f"{os.getenv('HOME')}/Library/Application Support/Chocolate/database.db", + "images": f"{os.getenv('HOME')}/Library/Application Support/Chocolate/images/", + "logs": f"{os.getenv('HOME')}/Library/Application Support/Chocolate/server.log", + }, +} + +OPERATING_SYSTEM = platform.system() + +if OPERATING_SYSTEM not in paths: + raise UnsupportedSystemDefaultPath( + f"No known default file path for the config / database on your operating system ({OPERATING_SYSTEM}). Please use --config and --database path or create a pull request to add your system to the one supported by Chocolate" + ) + +CONFIG_PATH = ARGUMENTS.config or paths[OPERATING_SYSTEM]["config"] +CONFIG_PATH = CONFIG_PATH.replace("\\", "/") + +DB_PATH = ARGUMENTS.db or paths[OPERATING_SYSTEM]["db"] +DB_PATH = DB_PATH.replace("\\", "/") + +LOG_PATH = ARGUMENTS.logs or paths[OPERATING_SYSTEM]["logs"] +LOG_PATH = LOG_PATH.replace("\\", "/") + +IMAGES_PATH = ARGUMENTS.images or paths[OPERATING_SYSTEM]["images"] +IMAGES_PATH = IMAGES_PATH.replace("\\", "/") +if IMAGES_PATH.endswith("/"): + IMAGES_PATH = IMAGES_PATH[:-1] + + +def create_app(): + is_in_docker = os.environ.get("AM_I_IN_A_DOCKER_CONTAINER", False) + TEMPLATE_FOLDER = "" + + if is_in_docker: + dir_path = "/chocolate" + TEMPLATE_FOLDER = f"{dir_path}/templates" + else: + dir_path = pathlib.Path(__package__).parent + TEMPLATE_FOLDER = f"{dir_path}/templates" + + if not os.path.isdir(IMAGES_PATH): + os.mkdir(IMAGES_PATH) + if not os.path.isdir(f"{IMAGES_PATH}/avatars"): + os.mkdir(f"{IMAGES_PATH}/avatars") + + app = Flask( + __name__, static_folder=f"{dir_path}/static", template_folder=TEMPLATE_FOLDER + ) + + app.secret_key = "ChocolateDBPassword" + + CORS(app, supports_credentials=True, resources={r"/*": {"origins": "*"}}) + app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{DB_PATH}" + app.config["MAX_CONTENT_LENGTH"] = 4096 * 4096 + app.config["UPLOAD_FOLDER"] = f"{dir_path}/static/img/" + app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False + app.config["DIR_PATH"] = dir_path + app.config["JSON_AS_ASCII"] = False + + from .routes.users import users_bp + from .routes.settings import settings_bp + from .routes.libraries import libraries_bp + from .routes.arr import arr_bp + + app.register_blueprint(users_bp) + app.register_blueprint(settings_bp) + app.register_blueprint(libraries_bp) + app.register_blueprint(arr_bp) + + DB.init_app(app) + MIGRATE.init_app(app, DB) + LOGIN_MANAGER.init_app(app) + LOGIN_MANAGER.login_view = "login" + + return app + + +def check_dependencies(): + if not shutil.which("ffmpeg"): + logging.warning( + "ffmpeg is not installed. Chocolate will not be able to play videos." + ) + + +def get_dir_path(): + is_in_docker = os.environ.get("AM_I_IN_A_DOCKER_CONTAINER", False) + + if is_in_docker: + dir_path = "/chocolate" + else: + dir_path = os.path.dirname(__file__).replace("\\", "/") + + return dir_path + + +def create_tmdb(): + tmdb = TMDb() + api_key_tmdb = config["APIKeys"]["TMDB"] + if api_key_tmdb == "Empty": + print( + "Follow this tutorial to get your TMDB API Key : https://developers.themoviedb.org/3/getting-started/introduction" + ) + tmdb.api_key = config["APIKeys"]["TMDB"] + tmdb.language = config["ChocolateSettings"]["language"] + + return tmdb + + +def get_config(): + if not os.path.exists(CONFIG_PATH): + logging.warning( + f"Config file not found at {CONFIG_PATH}. Creating a new one..." + ) + + if not os.path.isdir(os.path.dirname(CONFIG_PATH)): + os.mkdir(os.path.dirname(CONFIG_PATH)) + + with open(f"{get_dir_path()}/empty_config.ini", "r") as empty_config: + with open(CONFIG_PATH, "w") as config: + config.write(empty_config.read()) + + config = configparser.ConfigParser() + config.read(CONFIG_PATH) + if config["ChocolateSettings"]["language"] == "Empty": + config["ChocolateSettings"]["language"] = "EN" + return config + + +def write_config(config): + with open(CONFIG_PATH, "w") as configfile: + config.write(configfile) + + +check_dependencies() + +config = get_config() +tmdb = create_tmdb() diff --git a/src/chocolate_app/__main__.py b/src/chocolate_app/__main__.py index c0676d2..b584b5f 100644 --- a/src/chocolate_app/__main__.py +++ b/src/chocolate_app/__main__.py @@ -1,3548 +1,3596 @@ -import datetime -import io -import json -import os -import platform -import re -import subprocess -import warnings -import zipfile -import rarfile -import fitz -import logging -import git -import GPUtil -import pycountry -import requests -import sqlalchemy -import natsort - -from time import localtime, mktime, time -from uuid import uuid4 -from deep_translator import GoogleTranslator -from flask import ( - abort, - jsonify, - make_response, - request, - send_file, - render_template, -) -from guessit import guessit -from PIL import Image -from pypresence import Presence -from tmdbv3api import TV, Movie, Person, TMDb, Search -from tmdbv3api.as_obj import AsObj -from unidecode import unidecode -from videoprops import get_video_properties -from operator import itemgetter - -from . import ( - create_app, - get_dir_path, - DB, - LOGIN_MANAGER, - tmdb, - config, - all_auth_tokens, - ARGUMENTS, - IMAGES_PATH, - write_config, -) -from .tables import Language, Movies, Series, Seasons, Episodes, OthersVideos, Users, Libraries, Books, Artists, MusicLiked, MusicPlayed, Playlists, Tracks, Albums, Actors, Games, LatestEpisodeWatched, LibrariesMerge -from . import scans -from .utils.utils import generate_log, check_authorization, user_in_lib - -app = create_app() -dir_path = get_dir_path() - -with app.app_context(): - DB.create_all() - -log = logging.getLogger("werkzeug") -log.setLevel(logging.DEBUG) - -start_time = mktime(localtime()) - -with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=sqlalchemy.exc.SAWarning) - -langs_dict = GoogleTranslator().get_supported_languages(as_dict=True) - - -@LOGIN_MANAGER.user_loader -def load_user(id): - return Users.query.get(int(id)) - - -try: - repo = git.Repo(search_parent_directories=True) - last_commit_hash = repo.head.object.hexsha[:7] -except Exception: - last_commit_hash = "xxxxxxx" - - -def translate(string): - language = config["ChocolateSettings"]["language"] - if language == "EN": - return string - translated = GoogleTranslator(source="english", target=language.lower()).translate( - string - ) - return translated - - -tmdb.language = config["ChocolateSettings"]["language"].lower() -tmdb.debug = True - -movie = Movie() -show = TV() - -error_message = True -client_id = "771837466020937728" - -enabled_rpc = config["ChocolateSettings"]["discordrpc"] -if enabled_rpc == "true": - try: - RPC = Presence(client_id) - RPC.connect() - except Exception: - enabled_rpc == "false" - config.set("ChocolateSettings", "discordrpc", "false") - write_config(config) - -searched_films = [] -all_movies_not_sorted = [] -searched_series = [] -simple_data_series = {} - -config_language = config["ChocolateSettings"]["language"] -with app.app_context(): - language_db = DB.session.query(Language).first() - exists = DB.session.query(Language).first() is not None - if not exists: - new_language = Language(language="EN") - DB.session.add(new_language) - DB.session.commit() - language_db = DB.session.query(Language).first() - if language_db.language != config_language: - DB.session.query(Movies).delete() - DB.session.query(Series).delete() - DB.session.query(Seasons).delete() - DB.session.query(Episodes).delete() - language_db.language = config_language - DB.session.commit() - -CHUNK_LENGTH = 5 -CHUNK_LENGTH = int(CHUNK_LENGTH) - -movies_genre = [] -movie_extension = "" -websites_trailers = { - "YouTube": "https://www.youtube.com/embed/", - "Dailymotion": "https://www.dailymotion.com/video_movie/", - "Vimeo": "https://vimeo.com/", -} - - -@app.after_request -def after_request(response): - code_to_status = { - 100: "Keep the change, ya filthy animal", - 101: "I feel the need... the need for speed.", - 102: "There's a storm coming, Mr. Wayne.", - 103: "I'll be back.", - 200: "Everything is awesome!", - 201: "It's alive! It's alive!", - 202: "Challenge accepted!", - 203: "Non - Authoritative Information", - 204: "Nothing to see here.", - 205: "I feel the power of the reset.", - 206: "I've got a bad feeling about this... but only a part of it.", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "I'm going on an adventure!", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "(Unused)", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "You shall not pass", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Payload Too Large", - 414: "URI Too Long", - 415: "Unsupported Media Type", - 416: "Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 421: "Misdirected Request", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 425: "Too Early", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 451: "Unavailable For Legal Reasons", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 510: "Not Extended", - 511: "Network Authentication Required", - } - - if response.status_code in code_to_status: - generate_log( - request, f"{response.status_code} - {code_to_status[response.status_code]}" - ) - else: - generate_log(request, f"{response.status_code} - Unknown status code") - - return response - - -@app.route("/") -@app.route("/") -def index(path=None): - return render_template("index.html") - - -@app.route("/check_login", methods=["POST"]) -def check_login(): - global all_auth_tokens - token = request.get_json()["token"] - if not token: - generate_log(request, "ERROR") - return jsonify({"status": "error"}) - - token = "Bearer " + token - - if token not in all_auth_tokens.keys(): - generate_log(request, "ERROR") - return jsonify({"status": "error"}) - - user = Users.query.filter_by(name=all_auth_tokens[token]["user"]).first() - return jsonify( - { - "status": "ok", - "username": all_auth_tokens[token]["user"], - "account_type": user.account_type, - "account_id": user.id, - } - ) - - -@app.route("/check_download") -def check_download(): - if config["ChocolateSettings"]["allowdownload"] == "true": - return jsonify(True) - return jsonify(False) - - -def length_video(path: str) -> float: - seconds = subprocess.run( - [ - "ffprobe", - "-v", - "error", - "-show_entries", - "format=duration", - "-of", - "default=noprint_wrappers=1:nokey=1", - path, - ], - stdout=subprocess.PIPE, - text=True, - ) - return float(seconds.stdout) or 0 - - -def get_gpu_info() -> str: - if platform.system() == "Windows": - return gpuname() - elif platform.system() == "Darwin": - return subprocess.check_output( - ["/usr/sbin/sysctl", "-n", "machdep.cpu.brand_string"] - ).strip() - elif platform.system() == "Linux": - return subprocess.check_output( - ["lshw", "-C", "display", "-short"] - ).decode("utf-8") - return "" - - -def gpuname() -> str: - """Returns the model name of the first available GPU""" - try: - gpus = GPUtil.getGPUs() - except Exception: - print( - "Unable to detect GPU model." - ) - return "UNKNOWN" - if len(gpus) == 0: - raise ValueError("No GPUs detected in the system") - return gpus[0].name - -def get_gpu_brand(): - gpu = get_gpu_info().lower() - nvidia_possibilities = ["nvidia", "gtx", "rtx", "geforce"] - amd_possibilities = ["amd", "radeon", "rx", "vega"] - intel_possibilities = ["intel", "hd graphics", "iris", "uhd"] - mac_possibilities = ["apple", "mac", "m1", "m2"] - if any(x in gpu for x in nvidia_possibilities): - return "NVIDIA" - elif any(x in gpu for x in amd_possibilities): - return "AMD" - elif any(x in gpu for x in intel_possibilities): - return "Intel" - elif any(x in gpu for x in mac_possibilities): - return "Apple" - else: - return "UNKNOWN" - - - -@app.route("/language_file") -def language_file(): - language = config["ChocolateSettings"]["language"] - - if ( - not os.path.isfile(f"{dir_path}/static/lang/{language.lower()}.json") - or "{}" - in open( - f"{dir_path}/static/lang/{language.lower()}.json", "r", encoding="utf-8" - ).read() - ): - language = "EN" - - with open( - f"{dir_path}/static/lang/{language.lower()}.json", "r", encoding="utf-8" - ) as f: - language = json.load(f) - - with open(f"{dir_path}/static/lang/EN.json", "r", encoding="utf-8") as f: - en = json.load(f) - - for key in en: - if key not in language: - language[key] = en[key] - - return jsonify(language) - - -@app.route("/video_movie/.m3u8", methods=["GET"]) -def create_m3u8(movie_id): - movie = Movies.query.filter_by(id=movie_id).first() - if not movie: - abort(404) - video_path = movie.slug - duration = length_video(video_path) - - file = f"""#EXTM3U -#EXT-X-MEDIA-SEQUENCE:0 -#EXT-X-TARGETDURATION:{CHUNK_LENGTH}\n\n""" - - for i in range(0, int(duration), CHUNK_LENGTH): - file += f"#EXTINF:{int(CHUNK_LENGTH)},\n/chunk_movie/{movie_id}-{(i // CHUNK_LENGTH) + 1}.ts\n" # noqa - - file += "#EXT-X-ENDLIST" - - response = make_response(file) - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{movie_id}.m3u8" - ) - - return response - - -@app.route("/video_movie//.m3u8", methods=["GET"]) -def create_m3u8_quality(quality, movie_id): - movie = Movies.query.filter_by(id=movie_id).first() - video_path = movie.slug - duration = length_video(video_path) - file = f"""#EXTM3U -#EXT-X-MEDIA-SEQUENCE:0 -#EXT-X-TARGETDURATION:{CHUNK_LENGTH}\n""" - - for i in range(0, int(duration), CHUNK_LENGTH): - file += f"#EXTINF:{int(CHUNK_LENGTH)},\n/chunk_movie/{quality}/{movie_id}-{(i // CHUNK_LENGTH) + 1}.ts\n" - - file += "#EXT-X-ENDLIST" - - response = make_response(file) - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{movie_id}.m3u8" - ) - - return response - - -@app.route("/video_other/", methods=["GET"]) -def create_other_m3u8(hash): - other = OthersVideos.query.filter_by(video_hash=hash).first() - video_path = other.slug - duration = length_video(video_path) - file = f""" -#EXTM3U - -#EXT-X-VERSION:4 -#EXT-X-TARGETDURATION:{CHUNK_LENGTH} -#EXT-X-MEDIA-SEQUENCE:1 - """ - - for i in range(0, int(duration), CHUNK_LENGTH): - file += f""" -#EXTINF:{float(CHUNK_LENGTH)}, -/chunk_other/{hash}-{(i // CHUNK_LENGTH) + 1}.ts - """ - - file += "\n#EXT-X-ENDLIST" - - response = make_response(file) - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set("Content-Disposition", "attachment", filename=f"{hash}.m3u8") - - return response - - -@app.route("/video_other//", methods=["GET"]) -def create_other_m3u8_quality(quality, hash): - other = OthersVideos.query.filter_by(video_hash=hash).first() - video_path = other.slug - duration = length_video(video_path) - file = f""" -#EXTM3U - -#EXT-X-VERSION:4 -#EXT-X-TARGETDURATION:{CHUNK_LENGTH} -#EXT-X-MEDIA-SEQUENCE:1 - """ - - for i in range(0, int(duration), CHUNK_LENGTH): - file += f""" -#EXTINF:{float(CHUNK_LENGTH)}, -/chunk_other/{quality}/{hash}-{(i // CHUNK_LENGTH) + 1}.ts - """ - - file += "\n#EXT-X-ENDLIST" - - response = make_response(file) - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set("Content-Disposition", "attachment", filename=f"{hash}.m3u8") - - return response - - -@app.route("/video_serie/", methods=["GET"]) -def create_serie_m3u8(episode_id): - episode = Episodes.query.filter_by(episode_id=episode_id).first() - episode_path = episode.slug - duration = length_video(episode_path) - file = f""" -#EXTM3U - -#EXT-X-VERSION:4 -#EXT-X-TARGETDURATION:{CHUNK_LENGTH} -#EXT-X-MEDIA-SEQUENCE:1 - """ - - for i in range(0, int(duration), CHUNK_LENGTH): - file += f""" -#EXTINF:{float(CHUNK_LENGTH)}, -/chunk_serie/{episode_id}-{(i // CHUNK_LENGTH) + 1}.ts - """ - - file += "\n#EXT-X-ENDLIST" - - response = make_response(file) - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set("Content-Disposition", "attachment", filename=f"{episode_id}") - - return response - - -@app.route("/video_serie//", methods=["GET"]) -def create_serie_m3u8_quality(quality, episode_id): - episode = Episodes.query.filter_by(episode_id=episode_id).first() - episode_path = episode.slug - duration = length_video(episode_path) - file = f""" -#EXTM3U - -#EXT-X-VERSION:4 -#EXT-X-TARGETDURATION:{CHUNK_LENGTH} -#EXT-X-MEDIA-SEQUENCE:1 - """ - - for i in range(0, int(duration), CHUNK_LENGTH): - file += f""" -#EXTINF:{float(CHUNK_LENGTH)}, -/chunk_serie/{quality}/{episode_id}-{(i // CHUNK_LENGTH) + 1}.ts - """ - - file += "\n#EXT-X-ENDLIST" - - response = make_response(file) - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set("Content-Disposition", "attachment", filename=f"{episode_id}") - - return response - - -@app.route("/chunk_serie/-.ts", methods=["GET"]) -def get_chunk_serie(episode_id, idx=0): - seconds = (idx - 1) * CHUNK_LENGTH - episode = Episodes.query.filter_by(episode_id=episode_id).first() - episode_path = episode.slug - - time_start = str(datetime.timedelta(seconds=seconds)) - time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) - log_level_value = "error" - command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - log_level_value, - "-ss", - time_start, - "-to", - time_end, - "-i", - episode_path, - "-output_ts_offset", - time_start, - "-c:v", - "libx264", - "-c:a", - "aac", - "-b:a", - "196k", - "-ac", - "2", - "-f", - "mpegts", - "pipe:1", - ] - - pipe = subprocess.Popen(command, stdout=subprocess.PIPE) - - response = make_response(pipe.stdout.read()) - response.headers.set("Content-Type", "video/MP2T") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{episode_id}-{idx}.ts" - ) - - return response - - -@app.route("/chunk_serie//-.ts", methods=["GET"]) -def get_chunk_serie_quality(quality, episode_id, idx=0): - seconds = (idx - 1) * CHUNK_LENGTH - episode = Episodes.query.filter_by(episode_id=episode_id).first() - episode_path = episode.slug - - time_start = str(datetime.timedelta(seconds=seconds)) - time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) - video_properties = get_video_properties(episode_path) - width = video_properties["width"] - height = video_properties["height"] - new_width = int(float(quality)) - new_height = round(float(width) / float(height) * new_width) - if (new_height % 2) != 0: - new_height += 1 - log_level_value = "error" - - bitrate = { - "1080": "192k", - "720": "192k", - "480": "128k", - "360": "128k", - "240": "96k", - "144": "64k", - } - - command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - log_level_value, - "-ss", - time_start, - "-to", - time_end, - "-i", - episode_path, - "-output_ts_offset", - time_start, - "-c:v", - "libx264", - "-vf", - f"scale={new_height}:{new_width}", - "-c:a", - "aac", - "-b:a", - bitrate[quality], - "-ac", - "2", - "-f", - "mpegts", - "pipe:1", - ] - - pipe = subprocess.Popen(command, stdout=subprocess.PIPE) - - response = make_response(pipe.stdout.read()) - response.headers.set("Content-Type", "video/MP2T") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{episode_id}-{idx}.ts" - ) - - return response - - -@app.route("/chunk_movie/-.ts", methods=["GET"]) -def chunk_movie(movie_id, idx=0): - seconds = (idx - 1) * CHUNK_LENGTH - movie = Movies.query.filter_by(id=movie_id).first() - video_path = movie.slug - - time_start = str(datetime.timedelta(seconds=seconds)) - time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) - log_level_value = "error" - - command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - log_level_value, - "-ss", - time_start, - "-to", - time_end, - "-i", - video_path, - "-output_ts_offset", - time_start, - "-c:v", - "libx264", - "-c:a", - "aac", - "-b:a", - "196k", - "-ac", - "2", - "-f", - "mpegts", - "pipe:1", - ] - pipe = subprocess.Popen(command, stdout=subprocess.PIPE) - - response = make_response(pipe.stdout.read()) - response.headers.set("Content-Type", "video/MP2T") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{movie_id}-{idx}.ts" - ) - - return response - - -@app.route("/chunk_movie//-.ts", methods=["GET"]) -def get_chunk_quality(quality, movie_id, idx=0): - seconds = (idx - 1) * CHUNK_LENGTH - - movie = Movies.query.filter_by(id=movie_id).first() - video_path = movie.slug - - time_start = str(datetime.timedelta(seconds=seconds)) - time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) - video_properties = get_video_properties(video_path) - width = video_properties["width"] - height = video_properties["height"] - new_width = int(float(quality)) - new_height = round(float(width) / float(height) * new_width) - while (new_height % 8) != 0: - new_height += 1 - - while (new_width % 8) != 0: - new_width += 1 - - a_bitrate = { - "1080": "192k", - "720": "192k", - "480": "128k", - "360": "128k", - "240": "96k", - "144": "64k", - } - - a_bitrate = ((int(quality) - 144) / (1080 - 144)) * (192 - 64) + 64 - - v_bitrate = ((int(quality) - 144) / (1080 - 144)) * (5000 - 1500) + 1500 - - if v_bitrate < 1500: - v_bitrate = 1500 - - log_level_value = "error" - command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - log_level_value, - "-ss", - time_start, - "-to", - time_end, - "-hwaccel", - "auto", - "-i", - video_path, - "-output_ts_offset", - time_start, - "-c:v", - "libx264", - "-vf", - f"scale={new_height}:{new_width}", - "-c:a", - "aac", - "-b:a", - f"{a_bitrate}k", - "-ac", - "2", - "-f", - "mpegts", - "pipe:1", - ] - - pipe = subprocess.Popen(command, stdout=subprocess.PIPE) - - response = make_response(pipe.stdout.read()) - response.headers.set("Content-Type", "video/MP2T") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{movie_id}-{idx}.ts" - ) - - return response - - -@app.route("/chunk_other/-.ts", methods=["GET"]) -def get_chunk_other(hash, idx=0): - seconds = (idx - 1) * CHUNK_LENGTH - movie = OthersVideos.query.filter_by(video_hash=hash).first() - video_path = movie.slug - - time_start = str(datetime.timedelta(seconds=seconds)) - time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) - log_level_value = "error" - - command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - log_level_value, - "-ss", - time_start, - "-to", - time_end, - "-i", - video_path, - "-output_ts_offset", - time_start, - "-c:v", - "libx264", - "-c:a", - "aac", - "-b:a", - "196k", - "-ac", - "2", - "-f", - "mpegts", - "pipe:1", - ] - pipe = subprocess.Popen(command, stdout=subprocess.PIPE) - - response = make_response(pipe.stdout.read()) - response.headers.set("Content-Type", "video/MP2T") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{hash}-{idx}.ts" - ) - - return response - - -@app.route("/chunk_other//-.ts", methods=["GET"]) -def get_chunk_other_quality(quality, hash, idx=0): - seconds = (idx - 1) * CHUNK_LENGTH - movie = OthersVideos.query.filter_by(video_hash=hash).first() - video_path = movie.slug - - time_start = str(datetime.timedelta(seconds=seconds)) - time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) - video_properties = get_video_properties(video_path) - width = video_properties["width"] - height = video_properties["height"] - new_width = int(float(quality)) - new_height = round(float(width) / float(height) * new_width) - if (new_height % 2) != 0: - new_height += 1 - - bitrate = { - "1080": "192k", - "720": "192k", - "480": "128k", - "360": "128k", - "240": "96k", - "144": "64k", - } - - log_level_value = "error" - command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - log_level_value, - "-ss", - time_start, - "-to", - time_end, - "-i", - video_path, - "-output_ts_offset", - time_start, - "-c:v", - "libx264", - "-vf", - f"scale={new_height}:{new_width}", - "-c:a", - "aac", - "-b:a", - bitrate[quality], - "-ac", - "2", - "-f", - "mpegts", - "pipe:1", - ] - - pipe = subprocess.Popen(command, stdout=subprocess.PIPE) - - response = make_response(pipe.stdout.read()) - response.headers.set("Content-Type", "video/MP2T") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{hash}-{idx}.ts" - ) - - return response - - -@app.route("/chunk_caption//.vtt", methods=["GET"]) -def chunk_caption(movie_id, index): - movie = Movies.query.filter_by(id=movie_id).first() - video_path = movie.slug - extract_captions_command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - "error", - "-i", - video_path, - "-map", - f"0:{index}", - "-f", - "webvtt", - "pipe:1", - ] - extract_captions = subprocess.run(extract_captions_command, stdout=subprocess.PIPE) - - extract_captions_response = make_response(extract_captions.stdout) - extract_captions_response.headers.set("Content-Type", "text/VTT") - extract_captions_response.headers.set( - "Content-Disposition", "attachment", filename=f"{index}/{movie_id}.vtt" - ) - - return extract_captions_response - - -@app.route("/captionMovie/_.m3u8", methods=["GET"]) -def caption_movie_by_id_to_m3_u8(movie_id, id): - movie = Movies.query.filter_by(id=movie_id).first() - duration = movie.duration - duration = sum(x * int(t) for x, t in zip([3600, 60, 1], duration.split(":"))) - text = f""" -#EXTM3U -#EXT-X-TARGETDURATION:887 -#EXT-X-VERSION:3 -#EXT-X-MEDIA-SEQUENCE:1 -#EXT-X-PLAYLIST-TYPE:VOD -#EXTINF:{float(duration)+1}, -/chunk_caption/{id}/{movie_id}.vtt -#EXT-X-ENDLIST - """ - response = make_response(text) - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set("Accept-Encoding", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{movie_id}_{id}.m3u8" - ) - - return response - - -@app.route("/chunk_caption_serie///.vtt", methods=["GET"]) -def chunk_caption_serie(language, index, episode_id): - episode = Episodes.query.filter_by(episode_id=episode_id).first() - video_path = episode.slug - - extract_captions_command = [ - "ffmpeg", - "-hide_banner", - "-loglevel", - "error", - "-i", - video_path, - "-map", - f"0:{index}", - "-f", - "webvtt", - "pipe:1", - ] - - extract_captions = subprocess.run(extract_captions_command, stdout=subprocess.PIPE) - - extract_captions_response = make_response(extract_captions.stdout) - extract_captions_response.headers.set("Content-Type", "text/VTT") - extract_captions_response.headers.set( - "Content-Disposition", - "attachment", - filename=f"{language}/{index}/{episode_id}.vtt", - ) - - return extract_captions_response - - -@app.route("/get_language", methods=["GET"]) -def get_language(): - language = config["ChocolateSettings"]["language"] - return jsonify({"language": language}) - - -@app.route("/get_all_movies/", methods=["GET"]) -def get_all_movies(library): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SERVER") - username = all_auth_tokens[token]["user"] - - movies = Movies.query.filter_by(library_name=library).all() - user = Users.query.filter_by(name=username).first() - - movies_list = [movie.__dict__ for movie in movies] - - user_type = user.account_type - for movie in movies_list: - del movie["_sa_instance_state"] - - if user_type in ["Kid", "Teen"]: - for movie in movies_list: - if movie["adult"] == "True": - movies_list.remove(movie) - - used_keys = [ - "real_title", - "banner", - "cover", - "description", - "id", - "note", - "duration", - ] - - for movie in movies_list: - for key in list(movie.keys()): - if key not in used_keys: - del movie[key] - - movies_list = natsort.natsorted(movies_list, key=itemgetter(*["real_title"])) - - return jsonify(movies_list) - - -@app.route("/get_all_books/", methods=["GET"]) -def get_all_books(library): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SUCCESS") - - books = Books.query.filter_by(library_name=library).all() - books_list = [book.__dict__ for book in books] - - for book in books_list: - del book["_sa_instance_state"] - del book["slug"] - del book["book_type"] - del book["cover"] - del book["library_name"] - - books_list = natsort.natsorted(books_list, key=itemgetter(*["title"])) - - return jsonify(books_list) - - -@app.route("/get_all_playlists/", methods=["GET"]) -def get_all_playlists(library): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SUCCESS") - - username = all_auth_tokens[token]["user"] - user = Users.query.filter_by(name=username).first() - user_id = user.id - - playlists = Playlists.query.filter( - Playlists.user_id.like(f"%{user_id}%"), Playlists.library_name == library - ).all() - playlists_list = [playlist.__dict__ for playlist in playlists] - - for playlist in playlists_list: - del playlist["_sa_instance_state"] - - playlists_list = natsort.natsorted(playlists_list, key=itemgetter(*["name"])) - - liked_music = MusicLiked.query.filter_by(user_id=user_id, liked="true").all() - musics = [] - for music in liked_music: - music_id = music.music_id - musics.append(music_id) - musics = ",".join(musics) - - if len(musics) > 0: - playlists_list.insert( - 0, - { - "id": 0, - "name": "Likes", - "tracks": musics, - "cover": "/static/img/likes.webp", - }, - ) - - return jsonify(playlists_list) - - -@app.route("/get_all_albums/", methods=["GET"]) -def get_all_albums(library): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SUCCESS") - - albums = Albums.query.filter_by(library_name=library).all() - albums_list = [album.__dict__ for album in albums] - - for album in albums_list: - del album["_sa_instance_state"] - - albums_list = natsort.natsorted(albums_list, key=itemgetter(*["name"])) - - return jsonify(albums_list) - - -@app.route("/get_all_artists/", methods=["GET"]) -def get_all_artists(library): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SUCCESS") - - artists = Artists.query.filter_by(library_name=library).all() - artists_list = [artist.__dict__ for artist in artists] - - for artist in artists_list: - del artist["_sa_instance_state"] - - artists_list = natsort.natsorted(artists_list, key=itemgetter(*["name"])) - - return jsonify(artists_list) - - -@app.route("/get_all_tracks/", methods=["GET"]) -def get_all_tracks(library): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SUCCESS") - - tracks = Tracks.query.filter_by(library_name=library).all() - tracks_list = [track.__dict__ for track in tracks] - - for track in tracks_list: - del track["_sa_instance_state"] - try: - album_name = Albums.query.filter_by(id=track["album_id"]).first().name - track["album_name"] = album_name - except Exception: - track["album_name"] = None - - try: - artist_name = Artists.query.filter_by(id=track["artist_id"]).first().name - track["artist_name"] = artist_name - except Exception: - track["artist_name"] = None - - tracks_list = natsort.natsorted(tracks_list, key=itemgetter(*["name"])) - - return jsonify(tracks_list) - - -@app.route("/get_album_tracks/") -def get_album_tracks(album_id): - token = request.headers.get("Authorization") - - try: - user = all_auth_tokens[token]["user"] - generate_log(request, "SUCCESS") - except Exception: - generate_log(request, "ERROR") - return jsonify({"error": "Invalid token"}) - - user = Users.query.filter_by(name=user).first() - user_id = user.id - - tracks = Tracks.query.filter_by(album_id=album_id).all() - tracks_list = [track.__dict__ for track in tracks] - - artist = Artists.query.filter_by(id=tracks_list[0]["artist_id"]).first().name - album = Albums.query.filter_by(id=tracks_list[0]["album_id"]).first().name - - for track in tracks_list: - del track["_sa_instance_state"] - - track["artist_name"] = artist - track["album_name"] = album - - music_like = MusicLiked.query.filter_by( - music_id=track["id"], user_id=user_id - ).first() - if music_like: - track["liked"] = music_like.liked - else: - track["liked"] = False - - return jsonify(tracks_list) - - -@app.route("/get_playlist_tracks/") -def get_playlist_tracks(playlist_id): - token = request.headers.get("Authorization") - - try: - user = all_auth_tokens[token]["user"] - generate_log(request, "SUCCESS") - except Exception: - generate_log(request, "ERROR") - return jsonify({"error": "Invalid token"}) - - user = Users.query.filter_by(name=user).first() - user_id = user.id - tracks_list = [] - if playlist_id != "0": - tracks = Playlists.query.filter( - Playlists.user_id.like(f"%{user_id}%"), Playlists.id == playlist_id - ).first() - tracks = tracks.tracks.split(",") - for track in tracks: - track = Tracks.query.filter_by(id=track).first().__dict__ - - del track["_sa_instance_state"] - - music_like = MusicLiked.query.filter_by( - music_id=track["id"], user_id=user_id - ).first() - if music_like: - track["liked"] = music_like.liked - else: - track["liked"] = False - - if "album_id" in track: - album = Albums.query.filter_by(id=track["album_id"]).first() - if album: - track["album_name"] = album.name - - if "artist_id" in track: - artist = Artists.query.filter_by(id=track["artist_id"]).first() - if artist: - track["artist_name"] = artist.name - - tracks_list.append(track) - else: - likes = MusicLiked.query.filter_by(user_id=user_id, liked="true").all() - for like in likes: - track = Tracks.query.filter_by(id=like.music_id).first().__dict__ - - del track["_sa_instance_state"] - - music_like = MusicLiked.query.filter_by( - music_id=track["id"], user_id=user_id - ).first() - track["liked"] = music_like.liked - track["liked_at"] = music_like.liked_at - - if "album_id" in track: - album = Albums.query.filter_by(id=track["album_id"]).first() - track["album_name"] = album.name - - if "artist_id" in track: - artist = Artists.query.filter_by(id=track["artist_id"]).first() - track["artist_name"] = artist.name - - tracks_list.append(track) - - tracks_list = sorted(tracks_list, key=lambda k: k["liked_at"]) - - return jsonify(tracks_list) - - -@app.route("/play_track//", methods=["POST"]) -def play_track(id, user_id): - exists_in_music_played = MusicPlayed.query.filter_by( - music_id=id, user_id=user_id - ).first() - play_count = 0 - if exists_in_music_played: - exists_in_music_played.play_count = int(exists_in_music_played.play_count) + 1 - DB.session.commit() - play_count = exists_in_music_played.play_count - else: - music_played = MusicPlayed(music_id=id, user_id=user_id, play_count=1) - DB.session.add(music_played) - DB.session.commit() - play_count = music_played.play_count - - return jsonify( - { - "status": "success", - "music_id": id, - "user_id": user_id, - "play_count": play_count, - } - ) - - -@app.route("/like_track//", methods=["POST"]) -def like_track(id, user_id): - exist_in_mucis_liked = MusicLiked.query.filter_by( - music_id=id, user_id=user_id - ).first() - liked = False - like_dict = {"true": "false", "false": "true"} - if exist_in_mucis_liked: - exist_in_mucis_liked.liked = like_dict[exist_in_mucis_liked.liked] - liked = like_dict[exist_in_mucis_liked.liked] - exist_in_mucis_liked.liked_at = time() - DB.session.commit() - else: - music_liked = MusicLiked( - music_id=id, user_id=user_id, liked="true", liked_at=time() - ) - DB.session.add(music_liked) - DB.session.commit() - liked = music_liked.liked - - return jsonify( - {"status": "success", "music_id": id, "user_id": user_id, "liked": liked} - ) - - -@app.route("/create_playlist", methods=["POST"]) -def create_playlist(): - body = request.get_json() - - name = body["name"] - user_id = body["user_id"] - track_id = body["track_id"] - library = body["library"] - - exists = Playlists.query.filter_by( - name=name, user_id=user_id, library_name=library - ).first() - if exists: - return jsonify({"status": "error", "error": "Playlist already exists"}) - track = Tracks.query.filter_by(id=track_id).first() - duration = 0 - cover = track.cover - cover = generate_playlist_cover(track_id) - if not cover: - cover = "ahaha" - playlist = Playlists( - name=name, - user_id=user_id, - tracks=f"{track_id}", - library_name=library, - duration=duration, - cover=cover, - ) - DB.session.add(playlist) - DB.session.commit() - - return jsonify({"status": "success", "playlist_id": playlist.id}) - - -def generate_playlist_cover(id): - if isinstance(id, str) or isinstance(id, int): - id = int(id) - track = Tracks.query.filter_by(id=id).first() - cover = track.cover - return cover - elif isinstance(id, list): - tracks = [] - id_to_append = 0 - for i in range(4): - try: - tracks.append(id[i]) - except Exception: - tracks.append(id[id_to_append]) - id_to_append += 1 - - covers = [] - for track in tracks: - track = Tracks.query.filter_by(id=track).first() - - covers.append(track.cover) - - im1 = Image.open(covers[0]) - im2 = Image.open(covers[1]) - im3 = Image.open(covers[2]) - im4 = Image.open(covers[3]) - - im1 = im1.resize((200, 200)) - im2 = im2.resize((200, 200)) - im3 = im3.resize((200, 200)) - im4 = im4.resize((200, 200)) - - im1 = im1.crop((0, 0, 100, 100)) - im2 = im2.crop((100, 0, 200, 100)) - im3 = im3.crop((0, 100, 100, 200)) - im4 = im4.crop((100, 100, 200, 200)) - - im = Image.new("RGB", (200, 200)) - im.paste(im1, (0, 0)) - im.paste(im2, (100, 0)) - im.paste(im3, (0, 100)) - im.paste(im4, (100, 100)) - - cover = f"{IMAGES_PATH}/Playlist_{uuid4()}.webp" - exist = os.path.exists(cover) - while exist: - cover = f"{IMAGES_PATH}/Playlist_{uuid4()}.webp" - exist = os.path.exists(cover) - im.save(cover, "WEBP") - - return cover - - -@app.route("/add_track_to_playlist", methods=["POST"]) -def add_track_to_playlist(): - body = request.get_json() - - playlist_id = body["playlist_id"] - track_id = body["track_id"] - - playlist = Playlists.query.filter_by(id=playlist_id).first() - if playlist.tracks == "": - playlist.tracks = track_id - else: - playlist.tracks += f",{track_id}" - cover = generate_playlist_cover(playlist.tracks.split(",")) - playlist.cover = cover - DB.session.commit() - - return jsonify( - {"status": "success", "playlist_id": playlist_id, "track_id": track_id} - ) - - -@app.route("/get_track/") -def get_track(id): - track = Tracks.query.filter_by(id=id).first().slug - - return send_file(track) - - -@app.route("/get_album/") -def get_album(album_id): - generate_log(request, "SUCCESS") - - album = Albums.query.filter_by(id=album_id).first() - album_dict = album.__dict__ - del album_dict["_sa_instance_state"] - - artist = Artists.query.filter_by(id=album_dict["artist_id"]).first().name - album_dict["artist_name"] = artist - - return jsonify(album_dict) - - -@app.route("/get_playlist/") -def get_playlist(playlist_id): - generate_log(request, "SUCCESS") - token = request.headers.get("Authorization") - user = all_auth_tokens[token]["user"] - user = Users.query.filter_by(name=user).first() - user_id = user.id - - if playlist_id != "0": - playlist = Playlists.query.filter_by(id=playlist_id).first() - playlist_dict = playlist.__dict__ - del playlist_dict["_sa_instance_state"] - else: - liked_music = MusicLiked.query.filter_by(user_id=user_id, liked="true").all() - musics = [] - for music in liked_music: - music_id = music.music_id - musics.append(music_id) - musics = ",".join(musics) - - playlist_dict = { - "id": 0, - "name": "Likes", - "tracks": musics, - "cover": "/static/img/likes.webp", - } - - return jsonify(playlist_dict) - - -@app.route("/get_artist/") -def get_artist(artist_id): - generate_log(request, "SUCCESS") - - artist = Artists.query.filter_by(id=artist_id).first() - artist_dict = artist.__dict__ - del artist_dict["_sa_instance_state"] - - return jsonify(artist_dict) - - -@app.route("/get_artist_albums/") -def get_artist_albums(artist_id): - albums = Albums.query.filter_by(artist_id=artist_id).all() - artist = Artists.query.filter_by(id=artist_id).first() - library = artist.library_name - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SUCCESS") - - albums_list = [album.__dict__ for album in albums] - - for album in albums_list: - del album["_sa_instance_state"] - - return jsonify(albums_list) - - -@app.route("/get_artist_tracks/") -def get_artist_tracks(artist_id): - generate_log(request, "SUCCESS") - - tracks = Tracks.query.filter_by(artist_id=artist_id).all() - tracks_list = [track.__dict__ for track in tracks] - - for track in tracks_list: - del track["_sa_instance_state"] - try: - album_name = Albums.query.filter_by(id=track["album_id"]).first().name - track["album_name"] = album_name - except Exception: - pass - - try: - artist_name = Artists.query.filter_by(id=track["artist_id"]).first().name - track["artist_name"] = artist_name - except Exception: - pass - - return jsonify(tracks_list) - - -@app.route("/get_all_series/", methods=["GET"]) -def get_all_series(library): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - abort(401) - - generate_log(request, "SUCCESS") - - username = all_auth_tokens[token]["user"] - - series = Series.query.filter_by(library_name=library).all() - the_lib = Libraries.query.filter_by(lib_name=library).first() - user = Users.query.filter_by(name=username).first() - user_id = user.id - user_in_the_lib = user_in_lib(user_id, the_lib) - - if not user_in_the_lib: - abort(401) - - if series is None or user is None: - abort(404) - - series_list = [serie.__dict__ for serie in series] - - user_type = user.account_type - - if user_type in ["Kid", "Teen"]: - for serie in series_list: - if serie["adult"] == "True": - series_list.remove(serie) - - fusionned_lib = LibrariesMerge.query.filter_by(parent_lib=library).all() - fusionned_lib = [child.child_lib for child in fusionned_lib] - - for lib in fusionned_lib: - series = Series.query.filter_by(library_name=lib).all() - series_list += [serie.__dict__ for serie in series] - - for serie in series_list: - del serie["_sa_instance_state"] - - for serie in series_list: - serie["seasons"] = get_seasons(serie["id"]) - - series_list = natsort.natsorted(series_list, key=itemgetter(*["original_name"])) - - return jsonify(series_list) - - -def get_seasons(id): - seasons = Seasons.query.filter_by(serie=id).all() - seasons_list = [season.__dict__ for season in seasons] - for season in seasons_list: - del season["_sa_instance_state"] - - return seasons_list - - -def get_similar_movies(movie_id): - global searched_films - similar_movies_possessed = [] - movie = Movie() - similar_movies = movie.recommendations(movie_id) - for movie_info in similar_movies: - movie_name = movie_info.title - for movie in searched_films: - if movie_name == movie: - similar_movies_possessed.append(movie) - break - return similar_movies_possessed - - -@app.route("/get_movie_data/", methods=["GET"]) -def get_movie_data(movie_id): - exists = Movies.query.filter_by(id=movie_id).first() is not None - if exists: - movie = Movies.query.filter_by(id=movie_id).first().__dict__ - del movie["_sa_instance_state"] - movie["similarMovies"] = get_similar_movies(movie_id) - return jsonify(movie) - else: - abort(404) - - -@app.route("/get_other_data/", methods=["GET"]) -def get_other_data(video_hash): - exists = OthersVideos.query.filter_by(video_hash=video_hash).first() is not None - if exists: - other = OthersVideos.query.filter_by(video_hash=video_hash).first().__dict__ - del other["_sa_instance_state"] - return jsonify(other) - else: - abort(404) - - -@app.route("/get_serie_data/", methods=["GET"]) -def get_series_data(serie_id): - exists = Series.query.filter_by(id=serie_id).first() is not None - if exists: - serie = Series.query.filter_by(id=serie_id).first().__dict__ - serie["seasons"] = get_serie_seasons(serie["id"]) - - latest_episode_watched_db = LatestEpisodeWatched.query.filter_by( - serie_id=serie_id - ).first() - if latest_episode_watched_db is not None: - serie["latest_id"] = latest_episode_watched_db.episode_id - else: - serie["latest_id"] = None - - del serie["_sa_instance_state"] - return jsonify(serie) - else: - abort(404) - - -def get_serie_seasons(id): - seasons = Seasons.query.filter_by(serie=id).all() - seasons_dict = {} - for season in seasons: - seasons_dict[season.season_number] = dict(season.__dict__) - del seasons_dict[season.season_number]["_sa_instance_state"] - return seasons_dict - - -def transform(obj): - if isinstance(obj, AsObj): - return str(obj) - return obj.replace('"', '\\"') - - -@app.route("/edit_movie//", methods=["GET", "POST"]) -def edit_movie(id, library): - if request.method == "GET": - the_movie = Movies.query.filter_by(id=id, library_name=library).first() - the_movie = the_movie.__dict__ - del the_movie["_sa_instance_state"] - movie_name = guessit(the_movie["title"])["title"] - file_title = the_movie["slug"] - tmdb = TMDb() - tmdb.language = config["ChocolateSettings"]["language"].lower() - movie = Movie() - movie_info = Search().movies(movie_name) - movie_info = sorted(movie_info, key=lambda k: k["popularity"], reverse=True) - - real_movies = [] - for the_movie in movie_info: - accepted_types = [str, int, list, dict, float, bool] - the_movie = the_movie.__dict__ - for key in the_movie: - if type(the_movie[key]) not in accepted_types: - the_movie[key] = str(the_movie[key]) - real_movies.append(the_movie) - - movies = {"movies": real_movies, "file_title": file_title} - - return jsonify(movies) - - new_movie_id = request.get_json()["new_id"] - - if str(new_movie_id) == str(id): - return jsonify( - {"status": "error", "error": "The new id is the same as the old one"} - ) - the_movie = Movies.query.filter_by(id=id, library_name=library).first() - - movie = Movie() - movie_info = movie.details(new_movie_id) - the_movie.id = new_movie_id - the_movie.real_title = movie_info.title - the_movie.description = movie_info.overview - the_movie.note = movie_info.vote_average - date = movie_info.release_date - - try: - date = datetime.datetime.strptime(date, "%Y-%m-%d").strftime("%d/%m/%Y") - except ValueError: - date = "Unknown" - except UnboundLocalError: - date = "Unknown" - - the_movie.date = date - - bande_annonce = movie_info.videos.results - - bande_annonce_url = "" - if len(bande_annonce) > 0: - for video in bande_annonce: - bande_annonce_type = video.type - bande_annonce_host = video.site - bande_annonce_key = video.key - if bande_annonce_type == "Trailer": - try: - bande_annonce_url = ( - websites_trailers[bande_annonce_host] + bande_annonce_key - ) - break - except KeyError as e: - bande_annonce_url = "Unknown" - print(e) - - the_movie.bande_annonce_url = bande_annonce_url - the_movie.adult = str(movie_info.adult) - - alternatives_names = [] - actual_title = movie_info.title - characters = [" ", "-", "_", ":", ".", ",", "!", "'", "`", '"'] - empty = "" - for character in characters: - for character2 in characters: - if character != character2: - string_test = actual_title.replace(character, character2) - alternatives_names.append(string_test) - string_test = actual_title.replace(character2, character) - alternatives_names.append(string_test) - string_test = actual_title.replace(character, empty) - alternatives_names.append(string_test) - string_test = actual_title.replace(character2, empty) - alternatives_names.append(string_test) - - official_alternative_names = movie.alternative_titles(movie_id=the_movie.id).titles - if official_alternative_names is not None: - for official_alternative_name in official_alternative_names: - alternatives_names.append(official_alternative_name.title) - - alternatives_names = list(dict.fromkeys(alternatives_names)) - - alternatives_names = ",".join(alternatives_names) - - the_movie.alternatives_names = alternatives_names - - movie_genre = [] - genre = movie_info.genres - for genre_info in genre: - movie_genre.append(genre_info.name) - movie_genre = ",".join(movie_genre) - - the_movie.genre = movie_genre - casts = movie_info.casts.__dict__["cast"] - - the_cast = [] - for cast in casts: - actor_id = cast.id - actor_image = ( - f"https://www.themoviedb.org/t/p/w600_and_h900_bestv2{cast.profile_path}" - ) - if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): - with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: - f.write(requests.get(actor_image).content) - try: - img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") - img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") - os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") - except Exception: - os.rename( - f"{IMAGES_PATH}/Actor_{actor_id}.png", - f"{IMAGES_PATH}/Actor_{actor_id}.webp", - ) - - actor_image = f"{IMAGES_PATH}/Actor_{actor_id}.webp" - if actor_id not in the_cast: - the_cast.append(actor_id) - else: - break - person = Person() - p = person.details(actor_id) - exists = Actors.query.filter_by(actor_id=actor_id).first() is not None - if not exists: - actor = Actors( - name=cast.name, - actor_image=actor_image, - actor_description=p.biography, - actor_birth_date=p.birthday, - actor_birth_place=p.place_of_birth, - actor_programs=f"{the_movie.id}", - actor_id=actor_id, - ) - DB.session.add(actor) - DB.session.commit() - elif exists and str(the_movie.id) not in str( - Actors.query.filter_by(actor_id=cast.id).first().actor_programs - ).split(" "): - actor = Actors.query.filter_by(actor_id=cast.id).first() - actor.actor_programs = f"{actor.actor_programs} {the_movie.id}" - DB.session.commit() - - the_cast = the_cast[:5] - the_movie.cast = ",".join([str(x) for x in the_cast]) - - movie_cover_path = f"https://image.tmdb.org/t/p/original{movie_info.poster_path}" - banner = f"https://image.tmdb.org/t/p/original{movie_info.backdrop_path}" - - try: - os.remove(f"{IMAGES_PATH}/{new_movie_id}_Cover.webp") - except FileNotFoundError: - pass - try: - os.remove(f"{IMAGES_PATH}/{new_movie_id}_Cover.png") - except FileNotFoundError: - pass - with open(f"{IMAGES_PATH}/{new_movie_id}_Cover.png", "wb") as f: - f.write(requests.get(movie_cover_path).content) - try: - img = Image.open(f"{IMAGES_PATH}/{new_movie_id}_Cover.png") - img.save(f"{IMAGES_PATH}/{new_movie_id}_Cover.webp", "webp") - os.remove(f"{IMAGES_PATH}/{new_movie_id}_Cover.png") - movie_cover_path = f"{IMAGES_PATH}/{new_movie_id}_Cover.webp" - except Exception: - os.rename( - f"{IMAGES_PATH}/{new_movie_id}_Cover.png", - f"{IMAGES_PATH}/{new_movie_id}_Cover.webp", - ) - movie_cover_path = "/static/img/broken.webp" - try: - os.remove(f"{IMAGES_PATH}/{new_movie_id}_Banner.webp") - except FileNotFoundError: - pass - with open(f"{IMAGES_PATH}/{new_movie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - if not movie_info.backdrop_path: - banner = f"https://image.tmdb.org/t/p/original{movie_info.backdrop_path}" - if banner != "https://image.tmdb.org/t/p/originalNone": - with open(f"{IMAGES_PATH}/{new_movie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - else: - banner = "/static/img/broken.webp" - try: - img = Image.open(f"{IMAGES_PATH}/{new_movie_id}_Banner.png") - img.save(f"{IMAGES_PATH}/{new_movie_id}_Banner.webp", "webp") - os.remove(f"{IMAGES_PATH}/{new_movie_id}_Banner.png") - banner = f"{IMAGES_PATH}/{new_movie_id}_Banner.webp" - except Exception: - os.rename( - f"{IMAGES_PATH}/{new_movie_id}_Banner.png", - f"{IMAGES_PATH}/{new_movie_id}_Banner.webp", - ) - banner = "/static/img/brokenBanner.webp" - - if str(id) in movie_cover_path: - movie_cover_path = movie_cover_path.replace(str(id), str(new_movie_id)) - if str(id) in banner: - banner = banner.replace(str(id), str(new_movie_id)) - - the_movie.cover = movie_cover_path - the_movie.banner = banner - DB.session.commit() - - return jsonify({"status": "success"}) - - -@app.route("/edit_serie//", methods=["GET", "POST"]) -def edit_serie(id, library): - if request.method == "GET": - serie = Series.query.filter_by(id=id, library_name=library).first().__dict__ - - del serie["_sa_instance_state"] - serie_name = serie["original_name"] - tmdb = TMDb() - tmdb.language = config["ChocolateSettings"]["language"].lower() - serie_info = Search().tv_shows(serie_name) - if serie_info.results == {}: - data = { - "series": [], - "folder_title": serie["original_name"], - } - return jsonify(data, default=transform, indent=4) - - serie_info = sorted(serie_info, key=lambda k: k["popularity"], reverse=True) - - real_series = [] - for the_serie in serie_info: - accepted_types = [str, int, list, dict, float, bool] - the_serie = the_serie.__dict__ - for key in the_serie: - if type(the_serie[key]) not in accepted_types: - the_serie[key] = str(the_serie[key]) - real_series.append(the_serie) - - data = { - "series": real_series, - "folder_title": serie["original_name"], - } - - return jsonify(data, default=transform, indent=4) - - elif request.method == "POST": - serie_id = request.get_json()["new_id"] - the_serie = Series.query.filter_by(id=id, library_name=library).first() - - if the_serie.id == serie_id: - return jsonify({"status": "success"}) - - all_seasons = Seasons.query.filter_by(serie=serie_id).all() - for season in all_seasons: - cover = f"{dir_path}{season.season_cover_path}" - try: - os.remove(cover) - except FileNotFoundError: - pass - episodes = Episodes.query.filter_by(season_id=season.season_number).all() - for episode in episodes: - cover = f"{dir_path}{episode.episode_cover_path}" - os.remove(cover) - DB.session.delete(episode) - DB.session.delete(season) - DB.session.commit() - - tmdb = TMDb() - tmdb.language = config["ChocolateSettings"]["language"].lower() - show = TV() - details = show.details(serie_id) - res = details - - name = details.name - cover = f"https://image.tmdb.org/t/p/original{res.poster_path}" - banner = f"https://image.tmdb.org/t/p/original{res.backdrop_path}" - if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Cover.webp"): - with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: - f.write(requests.get(cover).content) - - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") - else: - os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.webp") - with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: - f.write(requests.get(cover).content) - - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") - - if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.webp"): - with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") - else: - os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.webp") - with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") - - banner = f"{IMAGES_PATH}/{serie_id}_Banner.webp" - cover = f"{IMAGES_PATH}/{serie_id}_Cover.webp" - description = res["overview"] - note = res.vote_average - date = res.first_air_date - cast = details.credits.cast - run_time = details.episode_run_time - duration = "" - for i in range(len(run_time)): - if i != len(run_time) - 1: - duration += f"{str(run_time[i])}:" - else: - duration += f"{str(run_time[i])}" - serie_genre = details.genres - bande_annonce = details.videos.results - bande_annonce_url = "" - if len(bande_annonce) > 0: - for video in bande_annonce: - bande_annonce_type = video.type - bande_annonce_host = video.site - bande_annonce_key = video.key - if bande_annonce_type == "Trailer" or len(bande_annonce) == 1: - try: - bande_annonce_url = ( - websites_trailers[bande_annonce_host] + bande_annonce_key - ) - break - except KeyError as e: - bande_annonce_url = "Unknown" - print(e) - genre_list = [] - for genre in serie_genre: - genre_list.append(str(genre.name)) - new_cast = [] - cast = list(cast)[:5] - for actor in cast: - actor_name = actor.name.replace("/", "") - actor_id = actor.id - actor_image = f"https://image.tmdb.org/t/p/original{actor.profile_path}" - if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): - with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: - f.write(requests.get(actor_image).content) - img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") - img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") - os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") - else: - os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.webp") - with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: - f.write(requests.get(actor_image).content) - img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") - img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") - os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") - - actor_image = f"{IMAGES_PATH}/Actor_{actor_id}.webp" - actor_character = actor.character - actor.profile_path = str(actor_image) - this_actor = [ - str(actor_name), - str(actor_character), - str(actor_image), - str(actor.id), - ] - new_cast.append(this_actor) - - person = Person() - p = person.details(actor.id) - exists = Actors.query.filter_by(actor_id=actor.id).first() is not None - if not exists: - actor = Actors( - name=actor.name, - actor_id=actor.id, - actor_image=actor_image, - actor_description=p.biography, - actor_birth_date=p.birthday, - actor_birth_place=p.place_of_birth, - actor_programs=f"{serie_id}", - ) - DB.session.add(actor) - DB.session.commit() - else: - actor = Actors.query.filter_by(actor_id=actor.id).first() - actor.actor_programs = f"{actor.actor_programs} {serie_id}" - DB.session.commit() - all_series_path = Libraries.query.filter_by(lib_name=library).first().lib_folder - serie_modified_time = os.path.getmtime( - f"{all_series_path}/{the_serie.original_name}" - ) - - new_cast = jsonify(new_cast[:5]) - genre_list = jsonify(genre_list) - is_adult = str(details["adult"]) - the_serie.id = serie_id - the_serie.name = name - the_serie.genre = genre_list - the_serie.duration = duration - the_serie.description = description - the_serie.cast = new_cast - the_serie.bande_annonce_url = bande_annonce_url - the_serie.cover = cover - the_serie.banner = banner - the_serie.note = note - the_serie.date = date - the_serie.serie_modified_time = serie_modified_time - the_serie.adult = is_adult - the_serie.library_name = library - - DB.session.commit() - scans.getSeries(library) - - return jsonify({"status": "success"}) - - -@app.route("/get_season_data/", methods=["GET"]) -def get_season_data(season_id): - season = Seasons.query.filter_by(season_id=season_id).first() - if season is None: - abort(404) - episodes = Episodes.query.filter_by(season_id=season_id).all() - episodes_dict = {} - for episode in episodes: - episodes_dict[episode.episode_number] = dict(episode.__dict__) - del episodes_dict[episode.episode_number]["_sa_instance_state"] - season = season.__dict__ - del season["_sa_instance_state"] - season["episodes"] = episodes_dict - return jsonify(season) - - -def sort_by_episode_number(episode): - return episode["episode_number"] - - -@app.route("/get_episodes/", methods=["GET"]) -def get_episodes(season_id): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - abort(401) - - username = all_auth_tokens[token]["user"] - - user = Users.query.filter_by(name=username).first() - season = Seasons.query.filter_by(season_id=season_id).first() - serie = Series.query.filter_by(id=season.serie).first() - library = serie.library_name - library = Libraries.query.filter_by(lib_name=library).first() - - if user is None: - abort(404) - - if serie is None: - abort(404) - - if season is None: - abort(404) - - user_in_the_lib = user_in_lib(user.id, library) - if not user_in_the_lib: - abort(401) - - if serie is None or user is None: - abort(404) - - episodes = Episodes.query.filter_by(season_id=season_id).all() - episodes_list = [] - - for episode in episodes: - the_episode = dict(episode.__dict__) - del the_episode["_sa_instance_state"] - episodes_list.append(the_episode) - - episodes_list = natsort.natsorted( - episodes_list, key=itemgetter(*["episode_number"]) - ) - - data = { - "episodes": episodes_list, - "library": library.lib_name, - } - - return jsonify(data) - - -@app.route("/get_episode_data/", methods=["GET"]) -def get_episode_data(episode_id): - episode = Episodes.query.filter_by(episode_id=episode_id).first() - if episode is None: - abort(404) - - episode = episode.__dict__ - - season = episode["season_id"] - episode_number = episode["episode_number"] - all_episodes = Episodes.query.filter_by(season_id=season).all() - all_episodes_list = [] - for episode_item in all_episodes: - all_episodes_list.append(dict(episode_item.__dict__)) - all_episodes_list = sorted(all_episodes_list, key=lambda k: k["episode_number"]) - episode_index = all_episodes_list.index( - [x for x in all_episodes_list if x["episode_number"] == episode_number][0] - ) - previous_episode, next_episode = None, None - - if episode_index != 0: - previous_episode = all_episodes_list[episode_index - 1]["episode_id"] - if episode_index != len(all_episodes_list) - 1: - next_episode = all_episodes_list[episode_index + 1]["episode_id"] - - new_episode_data = episode - - del new_episode_data["_sa_instance_state"] - new_episode_data["previous_episode"] = previous_episode - new_episode_data["next_episode"] = next_episode - - return jsonify(new_episode_data) - - -@app.route("/book_url/") -def book_url(id): - book = Books.query.filter_by(id=id).first() - if book is None: - abort(404) - book = book.__dict__ - return send_file(book["slug"], as_attachment=True) - - -@app.route("/book_url//") -def book_url_page(id, page): - book = Books.query.filter_by(id=id).first() - if book is None: - abort(404) - book = book.__dict__ - book_type = book["book_type"] - book_slug = book["slug"] - available = ["PDF", "CBZ", "CBR", "EPUB"] - if book_type in available: - if book_type == "PDF" or book_type == "EPUB": - pdf_doc = fitz.open(book_slug) - page = pdf_doc[int(page)] - image_stream = io.BytesIO(page.get_pixmap().tobytes("jpg")) - image_stream.seek(0) - return send_file(image_stream, mimetype="image/jpeg") - - elif book_type == "CBZ": - with zipfile.ZipFile(book_slug, "r") as zip: - image_file = zip.namelist()[int(page)] - if image_file.endswith((".jpg", ".jpeg", ".png")): - with zip.open(image_file) as image: - image_stream = io.BytesIO(image.read()) - image_stream.seek(0) - return send_file(image_stream, mimetype="image/jpeg") - - elif book_type == "CBR": - with rarfile.RarFile(book_slug, "r") as rar: - image_file = rar.infolist()[int(page)] - if image_file.filename.endswith((".jpg", ".jpeg", ".png")): - with rar.open(image_file) as image: - image_stream = io.BytesIO(image.read()) - image_stream.seek(0) - return send_file(image_stream, mimetype="image/jpeg") - - abort(404, "Book type not supported") - - -@app.route("/book_data/") -def book_data(id): - book = Books.query.filter_by(id=id).first().__dict__ - del book["_sa_instance_state"] - book_type = book["book_type"] - book_slug = book["slug"] - nb_pages = 0 - if book_type == "PDF" or book_type == "EPUB": - pdfDoc = fitz.open(book_slug) - nb_pages = pdfDoc.page_count - elif book_type == "CBZ": - with zipfile.ZipFile(book_slug, "r") as zip: - nb_pages = len(zip.namelist()) - elif book_type == "CBR": - with rarfile.RarFile(book_slug, "r") as rar: - nb_pages = len(rar.infolist()) - book["nb_pages"] = nb_pages - return jsonify(book) - - -@app.route("/download_other/") -def download_other(video_hash): - video = OthersVideos.query.filter_by(video_hash=video_hash).first() - video = video.__dict__ - del video["_sa_instance_state"] - return send_file(video["slug"], as_attachment=True) - - -@app.route("/get_all_others/") -def get_all_others(library): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - abort(401) - - username = all_auth_tokens[token]["user"] - - the_lib = Libraries.query.filter_by(lib_name=library).first() - - if not the_lib: - abort(404) - - user = Users.query.filter_by(name=username).first() - user_in_the_lib = user_in_lib(user.id, the_lib) - if not user_in_the_lib: - return jsonify([]) - - other = OthersVideos.query.filter_by(library_name=the_lib.lib_name).all() - other_list = [video.__dict__ for video in other] - - for video in other_list: - del video["_sa_instance_state"] - - return jsonify(other_list) - - -@app.route("/get_tv//") -def get_tv(tv_name, id): - if id != "undefined": - tv = Libraries.query.filter_by(lib_name=tv_name).first() - lib_folder = tv.lib_folder - - if is_valid_url(lib_folder): - m3u = requests.get(lib_folder).text - m3u = m3u.split("\n") - else: - with open(lib_folder, "r", encoding="utf-8") as f: - m3u = f.readlines() - m3u.pop(0) - for ligne in m3u: - if not ligne.startswith(("#EXTINF", "http")): - m3u.remove(ligne) - - if int(id) >= len(m3u): - return jsonify({"channel_url": "", "channel_name": ""}) - - line = m3u[int(id)] - next_line = m3u[int(id) + 1] - the_line = line - if the_line.startswith("#EXTINF"): - the_line = next_line - - try: - channel_name = line.split(",")[-1].replace("\n", "") - except IndexError: - channel_name = f"Channel {id}" - - if int(id) - 2 >= 0: - previous_id = int(id) - 2 - else: - previous_id = None - - if int(id) + 2 < len(m3u): - next_id = int(id) + 2 - else: - next_id = None - - return jsonify( - { - "channel_url": the_line, - "channel_name": channel_name, - "previous_id": previous_id, - "next_id": next_id, - } - ) - return jsonify( - {"channel_url": "", "channel_name": "", "error": "Channel not found"} - ) - - -@app.route("/get_channels/") -def get_channels(channels): - token = request.headers.get("Authorization") - check_authorization(request, token, channels) - - channels = Libraries.query.filter_by(lib_name=channels).first() - if not channels: - abort(404, "Library not found") - lib_folder = channels.lib_folder - - try: - with open(lib_folder, "r", encoding="utf-8") as f: - m3u = f.readlines() - except OSError: - lib_folder = lib_folder.replace("\\", "/") - m3u = requests.get(lib_folder).text - m3u = m3u.split("\n") - - m3u.pop(0) - while m3u[0] == "\n": - m3u.pop(0) - - channels = [] - for i in m3u: - if not i.startswith(("#EXTINF", "http")): - m3u.remove(i) - elif i == "\n": - m3u.remove(i) - for i in range(0, len(m3u) - 1, 2): - data = {} - try: - data["name"] = m3u[i].split(",")[-1].replace("\n", "") - work = True - except Exception: - work = False - if work: - data["url"] = m3u[i + 1].replace("\n", "") - data["channelID"] = i - tvg_id_regex = r'tvg-id="(.+?)"' - tvg_id = None - match = re.search(tvg_id_regex, m3u[i]) - if match: - tvg_id = match.group(1) - data["id"] = tvg_id - - tvg_logo_regex = r'tvg-logo="(.+?)"' - match = re.search(tvg_logo_regex, m3u[i]) - if match and match.group(1) != '" group-title=': - tvg_logo = match.group(1) - data["logo"] = tvg_logo - else: - broken_path = "" - data["logo"] = broken_path - - channels.append(data) - - channels = natsort.natsorted(channels, key=itemgetter(*["name"])) - return jsonify(channels) - - -@app.route("/search_tv//") -def search_tv(library, search): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - - library = Libraries.query.filter_by(lib_name=library).first() - if not library: - abort(404, "Library not found") - lib_folder = library.lib_folder - - try: - with open(lib_folder, "r", encoding="utf-8") as f: - m3u = f.readlines() - except OSError: - lib_folder = lib_folder.replace("\\", "/") - m3u = requests.get(lib_folder).text - m3u = m3u.split("\n") - - m3u.pop(0) - while m3u[0] == "\n": - m3u.pop(0) - - channels = [] - for i in m3u: - if not i.startswith(("#EXTINF", "http")): - m3u.remove(i) - elif i == "\n": - m3u.remove(i) - for i in range(0, len(m3u) - 1, 2): - data = {} - try: - data["name"] = m3u[i].split(",")[-1].replace("\n", "") - work = True - except Exception: - work = False - if work: - data["url"] = m3u[i + 1].replace("\n", "") - data["channelID"] = i - tvg_id_regex = r'tvg-id="(.+?)"' - tvg_id = None - match = re.search(tvg_id_regex, m3u[i]) - if match: - tvg_id = match.group(1) - data["id"] = tvg_id - - tvg_logo_regex = r'tvg-logo="(.+?)"' - match = re.search(tvg_logo_regex, m3u[i]) - if match and match.group(1) != '" group-title=': - tvg_logo = match.group(1) - data["logo"] = tvg_logo - else: - broken_path = "" - data["logo"] = broken_path - - channels.append(data) - - channels = natsort.natsorted(channels, key=itemgetter(*["name"])) - - search = search.lower() - search_terms = search.split(" ") - search_results = [] - - for channel in channels: - count = 0 - name = channel["name"].lower() - for term in search_terms: - if term in name: - count += 1 - if count > 0: - data = channel - data["count"] = count - search_results.append(data) - - search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) - - return jsonify(search_results) - - -@app.route("/search_tracks//") -def search_tracks(library, search): - tracks = Tracks.query.filter_by(library_name=library).all() - - search = search.lower() - search_terms = search.split(" ") - search_results = [] - - for track in tracks: - artist = Artists.query.filter_by(id=track.artist_id).first().name.lower() - if track.album_id: - album = Albums.query.filter_by(id=track.album_id).first().name.lower() - else: - album = "" - count = 0 - name = track.name.lower() - for term in search_terms: - if term in name: - count += 1 - if term in artist: - count += 1 - if term in album: - count += 1 - if count > 0: - data = track - data.count = count - data = data.__dict__ - del data["_sa_instance_state"] - search_results.append(data) - - search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) - - return jsonify(search_results) - - -@app.route("/search_albums//") -def search_albums(library, search): - albums = Albums.query.filter_by(library_name=library).all() - - search = search.lower() - search_terms = search.split(" ") - search_results = [] - - for album in albums: - artist = Artists.query.filter_by(id=album.artist_id).first().name.lower() - name = album.name.lower() - count = 0 - for term in search_terms: - if term in name: - count += 1 - if term in artist: - count += 1 - if count > 0: - data = album - data.count = count - data = data.__dict__ - del data["_sa_instance_state"] - search_results.append(data) - - search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) - - return jsonify(search_results) - - -@app.route("/search_artists//") -def search_artists(library, search): - artists = Artists.query.filter_by(library_name=library).all() - - search = search.lower() - search_terms = search.split(" ") - search_results = [] - - for artist in artists: - name = artist.name.lower() - count = 0 - for term in search_terms: - if term in name: - count += 1 - if count > 0: - data = artist - data.count = count - data = data.__dict__ - del data["_sa_instance_state"] - search_results.append(data) - - search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) - - return jsonify(search_results) - - -@app.route("/search_playlists//") -def search_playlists(library, search): - playlists = Playlists.query.filter_by(library_name=library).all() - - search = search.lower() - search_terms = search.split(" ") - search_results = [] - - for playlist in playlists: - tracks = playlist.tracks.split(",") - name = playlist.name.lower() - count = 0 - for term in search_terms: - if term in name: - count += 1 - for track in tracks: - track = Tracks.query.filter_by(id=track).first().name.lower() - if term in track: - count += 1 - if count > 0: - data = playlist - data.count = count - data = data.__dict__ - del data["_sa_instance_state"] - search_results.append(data) - - search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) - - return jsonify(search_results) - - -def is_valid_url(url): - try: - response = requests.get(url) - return response.status_code == requests.codes.ok - except requests.exceptions.RequestException: - return False - - -@app.route("/get_all_consoles/") -def get_all_consoles(library): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - generate_log(request, "SUCCESS") - consoles_data = { - "GB": {"name": "Gameboy", "image": "/static/img/Gameboy.png"}, - "GBA": {"name": "Gameboy Advance", "image": "/static/img/Gameboy Advance.png"}, - "GBC": {"name": "Gameboy Color", "image": "/static/img/Gameboy Color.png"}, - "N64": {"name": "Nintendo 64", "image": "/static/img/N64.png"}, - "NES": { - "name": "Nintendo Entertainment System", - "image": "/static/img/NES.png", - }, - "NDS": {"name": "Nintendo DS", "image": "/static/img/Nintendo DS.png"}, - "SNES": { - "name": "Super Nintendo Entertainment System", - "image": "/static/img/SNES.png", - }, - "Sega Mega Drive": { - "name": "Sega Mega Drive", - "image": "/static/img/Sega Mega Drive.png", - }, - "Sega Master System": { - "name": "Sega Master System", - "image": "/static/img/Sega Master System.png", - }, - "Sega Saturn": {"name": "Sega Saturn", "image": "/static/img/Sega Saturn.png"}, - "PS1": {"name": "PS1", "image": "/static/img/PS1.png"}, - } - - consoles = Games.query.filter_by(library_name=library).all() - consoles_list = [] - - for console in consoles: - data = { - "short_name": console.console, - "image": consoles_data[console.console]["image"], - "name": consoles_data[console.console]["name"], - } - if data not in consoles_list: - consoles_list.append(data) - - return jsonify(consoles_list) - - -@app.route("/get_all_games//") -def get_all_games(lib, console_name): - token = request.headers.get("Authorization") - check_authorization(request, token, lib) - generate_log(request, "SUCCESS") - - games = Games.query.filter_by(console=console_name, library_name=lib).all() - - if not games: - return jsonify([]) - - games_list = [game.__dict__ for game in games] - for game in games_list: - del game["_sa_instance_state"] - return jsonify(games_list) - - -@app.route("/game_data//") -def game_data(lib, game_id): - game_id = Games.query.filter_by(id=game_id, library_name=lib).first() - if not game_id: - abort(404) - game_id = game_id.__dict__ - del game_id["_sa_instance_state"] - - return jsonify(game_id) - - -@app.route("/game_file//") -def game_file(lib, id): - if id is not None: - game = Games.query.filter_by(id=id, library_name=lib).first() - game = game.__dict__ - slug = game["slug"] - return send_file(slug, as_attachment=True) - - -@app.route("/bios/") -def bios(console): - if console is not None: - if not os.path.exists(f"{dir_path}/static/bios/{console}"): - abort(404) - bios = [ - i - for i in os.listdir(f"{dir_path}/static/bios/{console}") - if i.endswith(".bin") - ] - bios = f"{dir_path}/static/bios/{console}/{bios[0]}" - - if not os.path.exists(bios): - abort(404) - - return send_file(bios, as_attachment=True) - - -@app.route("/search_movies//") -def search_movies(library, search): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - - username = all_auth_tokens[token]["user"] - user_type = Users.query.filter_by(name=username).first() - - search = unidecode(search.replace("%20", " ").lower()) - search_terms = search.split() - - search = search.replace("%20", " ").lower() - search_terms = search.split() - - for term in search_terms: - if len(term) <= 3: - search_terms.remove(term) - - movies = Movies.query.filter_by(library_name=library).all() - results = {} - for movie in movies: - count = 0 - title = movie.title.lower() - real_title = movie.real_title.lower() - slug = movie.slug.lower() - description = movie.description.lower().split(" ") - casts = movie.cast.split(",") - cast_list = [] - for cast in casts: - cast_list.append(cast.name.lower()) - - cast = " ".join(cast_list) - date = str(movie.date).lower() - genre = movie.genre.lower() - alternatives_names = movie.alternatives_names.lower() - value_used = [title, real_title, slug, cast, date, genre, alternatives_names] - value_points = [2, 4, 3, 1, 0.5, 0.5, 1.5] - for term in search_terms: - for value in value_used: - index = value_used.index(value) - if term.lower() in value: - count += value_points[index] - for word in description: - if term == word.lower(): - count += 0.1 - if count > 0: - results[movie] = count - - results = sorted(results.items(), key=lambda x: x[1], reverse=True) - - movies = [i[0].__dict__ for i in results] - for i in movies: - del i["_sa_instance_state"] - - user_type = user_type.account_type - - if user_type in ["Kid", "Teen"]: - for movie in movies: - if movie["adult"] == "True": - movies.remove(movie) - return jsonify(movies) - - -@app.route("/search_series//") -def search_series(library, search): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - - username = all_auth_tokens[token]["user"] - - series = Series.query.filter_by(library_name=library).all() - user = Users.query.filter_by(name=username).first() - library = Libraries.query.filter_by(lib_name=library).first() - - search = unidecode(search.replace("%20", " ").lower()) - search_terms = search.split() - - results = [] - - for serie_dict in series: - count = 0 - name = unidecode(serie_dict.name.lower()) - original_name = unidecode(serie_dict.original_name.lower()) - description = unidecode(serie_dict.description.lower()) - cast = unidecode(serie_dict.cast.lower()) - date = unidecode(str(serie_dict.date).lower()) - genre = unidecode(serie_dict.genre.lower()) - - value_used = [name, original_name, description, cast, date, genre] - - for term in search_terms: - for value in value_used: - if term in value: - count += 1 - for word in description: - if term == word.lower(): - count += 1 - if count > 0: - serie_dict = serie_dict.__dict__ - serie_dict["count"] = count - del serie_dict["_sa_instance_state"] - results.append(serie_dict) - - results = sorted(results, key=lambda x: x["count"], reverse=True) - - user_type = user.account_type - - if user_type in ["Kid", "Teen"]: - for serie_dict in results: - if serie_dict["adult"] == "True": - results.remove(serie_dict) - - return jsonify(results) - - -@app.route("/search_books//") -def search_books(library, search): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - - books = Books.query.filter_by(library_name=library).all() - library = Libraries.query.filter_by(lib_name=library).first() - - search = unidecode(search.replace("%20", " ").lower()) - search_terms = search.split() - - results = [] - - for book in books: - count = 0 - title = unidecode(book.title.lower()) - slug = unidecode(book.slug.lower()) - book_type = unidecode(book.book_type.lower()) - cover = unidecode(book.cover.lower()) - - value_used = [title, slug, book_type, cover] - - for term in search_terms: - for value in value_used: - if term in value: - count += 1 - if count > 0: - results.append(book) - - books = [i.__dict__ for i in results] - for book in books: - del book["_sa_instance_state"] - - books = natsort.natsorted(books, key=itemgetter(*["title"])) - return jsonify(books) - - -@app.route("/search_others//") -def search_others(library, search): - token = request.headers.get("Authorization") - check_authorization(request, token, library) - - username = all_auth_tokens[token]["user"] - - search = search.replace("%20", " ").lower() - search_terms = search.split() - - others = OthersVideos.query.filter_by(library_name=library).all() - results = {} - for other in others: - count = 0 - video_hash = other.video_hash.lower() - title = other.title.lower() - slug = other.slug.lower() - - value_used = [title, slug, video_hash] - for term in search_terms: - for value in value_used: - if term in value: - count += 1 - if count > 0: - results[other] = count - - results = sorted(results.items(), key=lambda x: x[1], reverse=True) - - others = [i[0].__dict__ for i in results] - for i in others: - del i["_sa_instance_state"] - - user = Users.query.filter_by(name=username).first() - user_type = user.account_type - - if user_type in ["Kid", "Teen"]: - for other in others: - if other["adult"] == "True": - others.remove(other) - return jsonify(others) - - -@app.route("/set_vues_time_code/", methods=["POST"]) -def set_vues_time_code(): - time_code = request.get_json() - movie_id = time_code["movie_id"] - time_code = time_code["time_code"] - username = time_code["username"] - movie = Movies.query.filter_by(id=movie_id).first() - if movie is None: - abort(404) - - actual_vues = movie.vues - p = re.compile("(?") -def main_movie(movie_id): - movie_id = movie_id.replace(".m3u8", "") - movie = Movies.query.filter_by(id=movie_id).first() - video_path = movie.slug - video_properties = get_video_properties(video_path) - height = int(video_properties["height"]) - width = int(video_properties["width"]) - m3u8_file = "#EXTM3U\n\n" - - m3u8_file += generate_caption_movie(movie_id) - qualities = [144, 240, 360, 480, 720, 1080] - file = [] - for quality in qualities: - if quality < height: - new_width = int(quality) - new_height = int(float(width) / float(height) * new_width) - new_height += new_height % 2 - m3u8_line = f"#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH={new_width*new_height},CODECS=\"avc1.4d4033,mp4a.40.2\",AUDIO=\"audio\",RESOLUTION={new_height}x{new_width}\n/video_movie/{quality}/{movie_id}.m3u8\n" - file.append(m3u8_line) - last_line = f"#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH={width*height},CODECS=\"avc1.4d4033,mp4a.40.2\",AUDIO=\"audio\",RESOLUTION={width}x{height}\n/video_movie/{movie_id}.m3u8\n\n\n" - file.append(last_line) - file = "".join(file) - m3u8_file += file - response = make_response(m3u8_file) - - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{movie_id}.m3u8" - ) - return response - - -@app.route("/can_i_play_movie/") -def can_i_play_movie(movie_id): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - return jsonify({"can_I_play": False}) - else: - user = all_auth_tokens[token]["user"] - movie = Movies.query.filter_by(id=movie_id).first() - if movie is None: - abort(404) - - lib = movie.library_name - the_lib = Libraries.query.filter_by(lib_name=lib).first() - - if the_lib is None: - abort(404) - - if the_lib.available_for is not None: - if user not in the_lib.available_for: - return jsonify({"can_I_play": False}) - return jsonify({"can_I_play": True}) - - -@app.route("/can_i_play_episode/") -def can_i_play_episode(episode_id): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - return jsonify({"can_I_play": False}) - else: - user = all_auth_tokens[token]["user"] - - users = Users.query.filter_by(name=user).first() - - episode = Episodes.query.filter_by(episode_id=episode_id).first() - season = Seasons.query.filter_by(season_id=episode.season_id).first() - serie = Series.query.filter_by(id=season.serie).first() - - latest_episode_of_serie_exist = ( - LatestEpisodeWatched.query.filter_by( - serie_id=serie.id, user_id=users.id - ).first() - is not None - ) - - if latest_episode_of_serie_exist: - latest_episode_of_serie = LatestEpisodeWatched.query.filter_by( - serie_id=serie.id, user_id=users.id - ).first() - latest_episode_of_serie.episode_id = episode_id - DB.session.commit() - else: - latest_episode_of_serie = LatestEpisodeWatched( - serie_id=serie.id, user_id=users.id, episode_id=episode_id - ) - DB.session.add(latest_episode_of_serie) - DB.session.commit() - - if episode is None: - abort(404) - - lib = serie.library_name - the_lib = Libraries.query.filter_by(lib_name=lib).first() - - if the_lib is None: - abort(404) - - if the_lib.available_for is not None: - if user not in the_lib.available_for: - return jsonify({"can_I_play": False}) - return jsonify({"can_I_play": True}) - - -@app.route("/can_i_play_other_video/") -def can_i_play_other_video(video_hash): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - return jsonify({"can_I_play": False}) - else: - user = all_auth_tokens[token]["user"] - video = OthersVideos.query.filter_by(video_hash=video_hash).first() - if video is None: - return jsonify({"can_I_play": False}) - - lib = video.library_name - the_lib = Libraries.query.filter_by(lib_name=lib).first() - - if the_lib is None: - return jsonify({"can_I_play": False}) - - if the_lib.available_for is not None: - available_for = the_lib.available_for.split(",") - if user not in available_for: - return jsonify({"can_I_play": False}) - return jsonify({"can_I_play": True}) - - -@app.route("/main_serie/") -def main_serie(episode_id): - episode = Episodes.query.filter_by(episode_id=episode_id).first() - episode_path = episode.slug - - video_properties = get_video_properties(episode_path) - height = int(video_properties["height"]) - width = int(video_properties["width"]) - m3u8_file = "#EXTM3U\n\n" - # m3u8_file += generate_caption_serie(episode_id) - file = [] - qualities = [144, 240, 360, 480, 720, 1080] - for quality in qualities: - if quality < height: - new_width = int(quality) - new_height = int(float(width) / float(height) * new_width) - if (new_height % 2) != 0: - new_height += 1 - m3u8_line = f"#EXT-X-STREAM-INF:BANDWIDTH={new_width*new_width},RESOLUTION={new_height}x{new_width}\n/video_serie/{quality}/{episode_id}\n" - file.append(m3u8_line) - last_line = f"#EXT-X-STREAM-INF:BANDWIDTH={width*height},RESOLUTION={width}x{height}\n/video_serie/{episode_id}\n" - file.append(last_line) - file = file[::-1] - file = "".join(file) - m3u8_file += file - - response = make_response(m3u8_file) - - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{episode_id}.m3u8" - ) - return response - - -@app.route("/main_other/") -def main_other(other_hash): - movie = OthersVideos.query.filter_by(video_hash=other_hash).first() - video_path = movie.slug - video_properties = get_video_properties(video_path) - height = int(video_properties["height"]) - width = int(video_properties["width"]) - m3u8_file = "#EXTM3U\n\n" - qualities = [144, 240, 360, 480, 720, 1080] - file = [] - for quality in qualities: - if quality < height: - new_width = int(quality) - new_height = int(float(width) / float(height) * new_width) - if (new_height % 2) != 0: - new_height += 1 - m3u8_line = f"#EXT-X-STREAM-INF:BANDWIDTH={new_width*new_width},RESOLUTION={new_height}x{new_width}\n/video_other/{quality}/{other_hash}\n" - file.append(m3u8_line) - last_line = f"#EXT-X-STREAM-INF:BANDWIDTH={width*height},RESOLUTION={width}x{height}\n/video_other/{other_hash}\n" - file.append(last_line) - file = file[::-1] - file = "".join(file) - m3u8_file += file - response = make_response(m3u8_file) - - response.headers.set("Content-Type", "application/x-mpegURL") - response.headers.set("Range", "bytes=0-4095") - response.headers.set("Accept-Encoding", "*") - response.headers.set("Access-Control-Allow-Origin", "*") - response.headers.set( - "Content-Disposition", "attachment", filename=f"{other_hash}.m3u8" - ) - return response - - -def generate_caption_serie(episode_id): - episode = Episodes.query.filter_by(episode_id=episode_id).first() - slug = episode.slug - caption_command = [ - "ffprobe", - "-loglevel", - "error", - "-select_streams", - "s", - "-show_entries", - "stream=index:stream_tags=language", - "-of", - "csv=p=0", - slug, - ] - caption_pipe = subprocess.Popen(caption_command, stdout=subprocess.PIPE) - caption_response = caption_pipe.stdout.read().decode("utf-8") - caption_response = caption_response.split("\n") - - all_captions = [] - - caption_response.pop() - - for line in caption_response: - line = line.rstrip() - language = line.split(",")[1] - new_language = pycountry.languages.get(alpha_2=language) - index = line.split(",")[0] - try: - title_name = line.split(",")[2] - - try: - title_name = title_name.split(" : ")[0] - subtitle_type = title_name.split(" : ")[1] - except Exception: - title_name = title_name - subtitle_type = "Unknown" - - except Exception: - title_name = new_language - subtitle_type = "Unknown" - if subtitle_type.lower() != "pgs": - all_captions.append( - { - "index": index, - "languageCode": language, - "language": new_language, - "url": f"/chunk_caption_serie/{language}/{index}/{episode_id}.vtt", - "name": title_name, - } - ) - return all_captions - - -def generate_caption_movie(movie_id): - movie_path = Movies.query.filter_by(id=movie_id).first() - slug = movie_path.slug - - caption_command = [ - "ffprobe", - "-loglevel", - "error", - "-select_streams", - "s", - "-show_entries", - "stream=index,codec_name:stream_tags=language,title,handler_name,codec_name", - "-of", - "csv=p=0", - slug, - ] - - caption_pipe = subprocess.Popen(caption_command, stdout=subprocess.PIPE) - caption_response = caption_pipe.stdout.read().decode("utf-8") - caption_response = caption_response.split("\n") - caption_response.pop() - - all_captions = [] - for line in caption_response: - line = line.rstrip() - index = line.split(",")[0] - type = line.split(",")[1] - language = line.split(",")[2] - try: - title_name = line.split(",")[3] - except Exception: - title_name = language - - if type != "subrip": - continue - - all_captions.append( - { - "index": index, - "languageCode": language, - "language": title_name, - "url": f"/captionMovie/{movie_id}_{index}.m3u8", - "name": title_name, - } - ) - string = "" - - for caption in all_captions: - string += f'#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="{caption["language"]}",DEFAULT=NO,FORCED=NO,URI="{caption["url"]}",LANGUAGE="{caption["languageCode"]}"\n' - - return string - - -@app.route("/get_actor_data/", methods=["GET", "POST"]) -def get_actor_data(actor_id): - if actor_id == "undefined": - abort(404) - movies_data = [] - series_data = [] - actor = Actors.query.filter_by(actor_id=actor_id).first() - movies = actor.actor_programs.split(" ") - for movie in movies: - in_movies = Movies.query.filter_by(id=movie).first() is not None - in_series = Series.query.filter_by(id=movie).first() is not None - if in_movies: - this_movie = Movies.query.filter_by(id=movie).first().__dict__ - del this_movie["_sa_instance_state"] - if this_movie not in movies_data: - movies_data.append(this_movie) - elif in_series: - this_series = Series.query.filter_by(id=movie).first().__dict__ - del this_series["_sa_instance_state"] - if this_series not in series_data: - series_data.append(this_series) - - actor_data = { - "actor_name": actor.name, - "actor_image": f"/actor_image/{actor_id}", - "actor_description": actor.actor_description, - "actor_birthday": actor.actor_birth_date, - "actor_birthplace": actor.actor_birth_place, - "actor_movies": movies_data, - "actor_series": series_data, - } - return jsonify(actor_data) - - -@app.route("/get_this_episode_data/", methods=["GET", "POST"]) -def get_this_episode_data(episode_id): - episode = Episodes.query.filter_by(episode_id=episode_id).first() - episode_data = { - "episode_name": episode.episode_name, - "intro_start": episode.intro_start, - "intro_end": episode.intro_end, - } - return jsonify(episode_data, default=lambda o: o.__dict__) - - -@app.route("/is_chocolate", methods=["GET", "POST"]) -def is_chocolate(): - return jsonify({"is_chocolate": True}) - - -@app.route("/download_movie/") -def download_movie(movie_id): - can_download = config["ChocolateSettings"]["allowDownload"].lower() == "true" - if not can_download: - return jsonify({"error": "download not allowed"}) - movie = Movies.query.filter_by(id=movie_id).first() - movie_path = movie.slug - movie_library = movie.library_name - library = Libraries.query.filter_by(lib_name=movie_library).first() - library_path = library.lib_folder - movie_path = f"{library_path}/{movie_path}" - return send_file(movie_path, as_attachment=True) - - -@app.route("/download_episode/") -def download_episode(episode_id): - can_download = config["ChocolateSettings"]["allowDownload"].lower() == "true" - if not can_download: - return jsonify({"error": "download not allowed"}) - episode = Episodes.query.filter_by(episode_id=episode_id).first() - episode_path = episode.slug - return send_file(episode_path, as_attachment=True) - - -@app.route("/movie_cover/") -def movie_cover(id): - movie = Movies.query.filter_by(id=id).first() - movie_cover = movie.cover - return send_file(movie_cover, as_attachment=True) - - -@app.route("/movie_banner/") -def movie_banner(id): - movie = Movies.query.filter_by(id=id).first() - movie_banner = movie.banner - return send_file(movie_banner, as_attachment=True) - - -@app.route("/serie_cover/") -def serie_cover(id): - serie = Series.query.filter_by(id=id).first() - serie_cover = serie.cover - return send_file(serie_cover, as_attachment=True) - - -@app.route("/serie_banner/") -def serie_banner(id): - serie = Series.query.filter_by(id=id).first() - serie_banner = serie.banner - return send_file(serie_banner, as_attachment=True) - - -@app.route("/season_cover/") -def season_cover(id): - season = Seasons.query.filter_by(season_id=id).first() - season_cover = season.cover - return send_file(season_cover, as_attachment=True) - - -@app.route("/episode_cover/") -def episode_cover(id): - episode = Episodes.query.filter_by(episode_id=id).first() - episode_cover = episode.episode_cover_path - if "https://" in episode_cover: - response = requests.get(episode_cover) - img = Image.open(io.BytesIO(response.content)) - season_id = episode.season_id - img.save(f"{IMAGES_PATH}/{season_id}_{id}_Cover.webp", "webp") - episode_cover = f"{IMAGES_PATH}/{season_id}_{id}_Cover.webp" - episode.episode_cover_path = episode_cover - DB.session.commit() - - return send_file(episode_cover, as_attachment=True) - - -@app.route("/other_cover/") -def other_cover(id): - other = OthersVideos.query.filter_by(video_hash=id).first() - other_cover = other.banner - return send_file(other_cover, as_attachment=True) - - -@app.route("/book_cover/") -def book_cover(id): - book = Books.query.filter_by(id=id).first() - book_cover = book.cover - return send_file(book_cover, as_attachment=True) - - -@app.route("/actor_image/") -def actor_image(id): - actor = Actors.query.filter_by(actor_id=id).first() - actor_image = actor.actor_image - if not actor or not os.path.exists(actor_image): - ext_to_ext = { - ".png": ".webp", - ".webp": ".png", - } - name, extension = os.path.splitext(actor_image) - new_extension = ext_to_ext[extension] - actor_image = f"{name}{new_extension}" - if not os.path.exists(actor_image): - actor.actor_image = ( - f"{dir_path}/static/img/avatars/defaultUserProfilePic.png" - ) - DB.session.commit() - return send_file( - f"{dir_path}/static/img/avatars/defaultUserProfilePic.png", - as_attachment=True, - ) - else: - actor.actor_image = actor_image - DB.session.commit() - return send_file(actor_image, as_attachment=True) - - -@app.route("/artist_image/") -def artist_image(id): - artist = Artists.query.filter_by(id=id).first() - artist_image = artist.cover - return send_file(artist_image, as_attachment=True) - - -@app.route("/album_cover/") -def album_cover(id): - album = Albums.query.filter_by(id=id).first() - album_cover = album.cover - return send_file(album_cover, as_attachment=True) - - -@app.route("/playlist_cover/") -def playlist_cover(id): - if id != "0": - playlist = Playlists.query.filter_by(id=id).first() - playlist_cover = playlist.cover - else: - playlist_cover = f"{dir_path}/static/img/likes.webp" - return send_file(playlist_cover, as_attachment=True) - - -@app.route("/track_cover/") -def track_cover(id): - track = Tracks.query.filter_by(id=id).first() - track_cover = track.cover - return send_file(track_cover, as_attachment=True) - - -@app.route("/user_image/") -def user_image(id): - user = Users.query.filter_by(id=id).first() - user_image = user.profil_picture - - if not user or not os.path.exists(user_image): - return send_file( - f"{dir_path}/static/img/avatars/defaultUserProfilePic.png", - as_attachment=True, - ) - - return send_file(user_image, as_attachment=True) - - -if __name__ == "__main__": - enabled_rpc = config["ChocolateSettings"]["discordrpc"] - if enabled_rpc == "true": - try: - RPC.update( - state="Loading Chocolate...", - details=f"The Universal MediaManager | ({last_commit_hash})", - large_image="loader", - large_text="Chocolate", - buttons=[ - { - "label": "Github", - "url": "https://github.com/ChocolateApp/Chocolate", - } - ], - start=start_time, - ) - except Exception: - pass - - with app.app_context(): - if not ARGUMENTS.no_scans: - libraries = Libraries.query.all() - libraries = [library.__dict__ for library in libraries] - - libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_name"])) - libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_type"])) - - type_to_call = { - "series": scans.getSeries, - "movies": scans.getMovies, - "consoles": scans.getGames, - "others": scans.getOthersVideos, - "books": scans.getBooks, - "musics": scans.getMusics, - } - - for library in libraries: - type_to_call[library["lib_type"]](library["lib_name"]) - - print() - print("\033[?25h", end="") - - enabled_rpc = config["ChocolateSettings"]["discordrpc"] - if enabled_rpc == "true": - try: - RPC.update( - state="Idling", - details=f"The Universal MediaManager | ({last_commit_hash})", - large_image="largeimage", - large_text="Chocolate", - buttons=[ - { - "label": "Github", - "url": "https://github.com/ChocolateApp/Chocolate", - } - ], - start=time(), - ) - except Exception: - pass - - app.run(host="0.0.0.0", port="8888") +import datetime +import io +import json +import os +import platform +import re +import subprocess +import warnings +import zipfile +import rarfile +import fitz +import logging +import git +import GPUtil +import pycountry +import requests +import sqlalchemy +import natsort + +from time import localtime, mktime, time +from uuid import uuid4 +from deep_translator import GoogleTranslator +from flask import ( + abort, + jsonify, + make_response, + request, + send_file, + render_template, +) +from guessit import guessit +from PIL import Image +from pypresence import Presence +from tmdbv3api import TV, Movie, Person, TMDb, Search +from tmdbv3api.as_obj import AsObj +from unidecode import unidecode +from videoprops import get_video_properties +from operator import itemgetter + +from . import ( + create_app, + get_dir_path, + DB, + LOGIN_MANAGER, + tmdb, + config, + all_auth_tokens, + ARGUMENTS, + IMAGES_PATH, + write_config, +) +from .tables import Language, Movies, Series, Seasons, Episodes, OthersVideos, Users, Libraries, Books, Artists, MusicLiked, MusicPlayed, Playlists, Tracks, Albums, Actors, Games, LatestEpisodeWatched, LibrariesMerge +from . import scans +from .utils.utils import generate_log, check_authorization, user_in_lib + +app = create_app() +dir_path = get_dir_path() + +with app.app_context(): + DB.create_all() + +log = logging.getLogger("werkzeug") +log.setLevel(logging.DEBUG) + +start_time = mktime(localtime()) + +with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=sqlalchemy.exc.SAWarning) + +langs_dict = GoogleTranslator().get_supported_languages(as_dict=True) + + +@LOGIN_MANAGER.user_loader +def load_user(id): + return Users.query.get(int(id)) + + +try: + repo = git.Repo(search_parent_directories=True) + last_commit_hash = repo.head.object.hexsha[:7] +except Exception: + last_commit_hash = "xxxxxxx" + + +def translate(string): + language = config["ChocolateSettings"]["language"] + if language == "EN": + return string + translated = GoogleTranslator(source="english", target=language.lower()).translate( + string + ) + return translated + + +tmdb.language = config["ChocolateSettings"]["language"].lower() +tmdb.debug = True + +movie = Movie() +show = TV() + +error_message = True +client_id = "771837466020937728" + +enabled_rpc = config["ChocolateSettings"]["discordrpc"] +if enabled_rpc == "true": + try: + RPC = Presence(client_id) + RPC.connect() + except Exception: + enabled_rpc == "false" + config.set("ChocolateSettings", "discordrpc", "false") + write_config(config) + +searched_films = [] +all_movies_not_sorted = [] +searched_series = [] +simple_data_series = {} + +config_language = config["ChocolateSettings"]["language"] +with app.app_context(): + language_db = DB.session.query(Language).first() + exists = DB.session.query(Language).first() is not None + if not exists: + new_language = Language(language="EN") + DB.session.add(new_language) + DB.session.commit() + language_db = DB.session.query(Language).first() + if language_db.language != config_language: + DB.session.query(Movies).delete() + DB.session.query(Series).delete() + DB.session.query(Seasons).delete() + DB.session.query(Episodes).delete() + language_db.language = config_language + DB.session.commit() + +CHUNK_LENGTH = 5 +CHUNK_LENGTH = int(CHUNK_LENGTH) + +movies_genre = [] +movie_extension = "" +websites_trailers = { + "YouTube": "https://www.youtube.com/embed/", + "Dailymotion": "https://www.dailymotion.com/video_movie/", + "Vimeo": "https://vimeo.com/", +} + + +@app.after_request +def after_request(response): + code_to_status = { + 100: "Keep the change, ya filthy animal", + 101: "I feel the need... the need for speed.", + 102: "There's a storm coming, Mr. Wayne.", + 103: "I'll be back.", + 200: "Everything is awesome!", + 201: "It's alive! It's alive!", + 202: "Challenge accepted!", + 203: "Non - Authoritative Information", + 204: "Nothing to see here.", + 205: "I feel the power of the reset.", + 206: "I've got a bad feeling about this... but only a part of it.", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "I'm going on an adventure!", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "(Unused)", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "You shall not pass", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Payload Too Large", + 414: "URI Too Long", + 415: "Unsupported Media Type", + 416: "Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 421: "Misdirected Request", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 425: "Too Early", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 451: "Unavailable For Legal Reasons", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 510: "Not Extended", + 511: "Network Authentication Required", + } + + if response.status_code in code_to_status: + generate_log( + request, f"{response.status_code} - {code_to_status[response.status_code]}" + ) + else: + generate_log(request, f"{response.status_code} - Unknown status code") + + return response + + +@app.route("/") +@app.route("/") +def index(path=None): + return render_template("index.html") + + +@app.route("/check_login", methods=["POST"]) +def check_login(): + global all_auth_tokens + token = request.get_json()["token"] + if not token: + generate_log(request, "ERROR") + return jsonify({"status": "error"}) + + token = "Bearer " + token + + if token not in all_auth_tokens.keys(): + generate_log(request, "ERROR") + return jsonify({"status": "error"}) + + user = Users.query.filter_by(name=all_auth_tokens[token]["user"]).first() + return jsonify( + { + "status": "ok", + "username": all_auth_tokens[token]["user"], + "account_type": user.account_type, + "account_id": user.id, + } + ) + + +@app.route("/check_download") +def check_download(): + if config["ChocolateSettings"]["allowdownload"] == "true": + return jsonify(True) + return jsonify(False) + + +def length_video(path: str) -> float: + seconds = subprocess.run( + [ + "ffprobe", + "-v", + "error", + "-show_entries", + "format=duration", + "-of", + "default=noprint_wrappers=1:nokey=1", + path, + ], + stdout=subprocess.PIPE, + text=True, + ) + return float(seconds.stdout) or 0 + + +def get_gpu_info() -> str: + if platform.system() == "Windows": + return gpuname() + elif platform.system() == "Darwin": + return subprocess.check_output( + ["/usr/sbin/sysctl", "-n", "machdep.cpu.brand_string"] + ).strip() + elif platform.system() == "Linux": + return subprocess.check_output( + ["lshw", "-C", "display", "-short"] + ).decode("utf-8") + return "" + + +def gpuname() -> str: + """Returns the model name of the first available GPU""" + try: + gpus = GPUtil.getGPUs() + except Exception: + print( + "Unable to detect GPU model." + ) + return "UNKNOWN" + if len(gpus) == 0: + raise ValueError("No GPUs detected in the system") + return gpus[0].name + +def get_gpu_brand(): + gpu = get_gpu_info().lower() + nvidia_possibilities = ["nvidia", "gtx", "rtx", "geforce"] + amd_possibilities = ["amd", "radeon", "rx", "vega"] + intel_possibilities = ["intel", "hd graphics", "iris", "uhd"] + mac_possibilities = ["apple", "mac", "m1", "m2"] + if any(x in gpu for x in nvidia_possibilities): + return "NVIDIA" + elif any(x in gpu for x in amd_possibilities): + return "AMD" + elif any(x in gpu for x in intel_possibilities): + return "Intel" + elif any(x in gpu for x in mac_possibilities): + return "Apple" + else: + return "UNKNOWN" + + + +@app.route("/language_file") +def language_file(): + language = config["ChocolateSettings"]["language"] + + if ( + not os.path.isfile(f"{dir_path}/static/lang/{language.lower()}.json") + or "{}" + in open( + f"{dir_path}/static/lang/{language.lower()}.json", "r", encoding="utf-8" + ).read() + ): + language = "EN" + + with open( + f"{dir_path}/static/lang/{language.lower()}.json", "r", encoding="utf-8" + ) as f: + language = json.load(f) + + with open(f"{dir_path}/static/lang/en.json", "r", encoding="utf-8") as f: + en = json.load(f) + + for key in en: + if key not in language: + language[key] = en[key] + + return jsonify(language) + + +@app.route("/video_movie/.m3u8", methods=["GET"]) +def create_m3u8(movie_id): + movie = Movies.query.filter_by(id=movie_id).first() + if not movie: + abort(404) + video_path = movie.slug + duration = length_video(video_path) + + file = f"""#EXTM3U +#EXT-X-MEDIA-SEQUENCE:0 +#EXT-X-TARGETDURATION:{CHUNK_LENGTH}\n\n""" + + for i in range(0, int(duration), CHUNK_LENGTH): + file += f"#EXTINF:{int(CHUNK_LENGTH)},\n/chunk_movie/{movie_id}-{(i // CHUNK_LENGTH) + 1}.ts\n" # noqa + + file += "#EXT-X-ENDLIST" + + response = make_response(file) + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{movie_id}.m3u8" + ) + + return response + + +@app.route("/video_movie//.m3u8", methods=["GET"]) +def create_m3u8_quality(quality, movie_id): + movie = Movies.query.filter_by(id=movie_id).first() + video_path = movie.slug + duration = length_video(video_path) + file = f"""#EXTM3U +#EXT-X-MEDIA-SEQUENCE:0 +#EXT-X-TARGETDURATION:{CHUNK_LENGTH}\n""" + + for i in range(0, int(duration), CHUNK_LENGTH): + file += f"#EXTINF:{int(CHUNK_LENGTH)},\n/chunk_movie/{quality}/{movie_id}-{(i // CHUNK_LENGTH) + 1}.ts\n" + + file += "#EXT-X-ENDLIST" + + response = make_response(file) + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{movie_id}.m3u8" + ) + + return response + + +@app.route("/video_other/", methods=["GET"]) +def create_other_m3u8(hash): + other = OthersVideos.query.filter_by(video_hash=hash).first() + video_path = other.slug + duration = length_video(video_path) + file = f""" +#EXTM3U + +#EXT-X-VERSION:4 +#EXT-X-TARGETDURATION:{CHUNK_LENGTH} +#EXT-X-MEDIA-SEQUENCE:1 + """ + + for i in range(0, int(duration), CHUNK_LENGTH): + file += f""" +#EXTINF:{float(CHUNK_LENGTH)}, +/chunk_other/{hash}-{(i // CHUNK_LENGTH) + 1}.ts + """ + + file += "\n#EXT-X-ENDLIST" + + response = make_response(file) + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set("Content-Disposition", "attachment", filename=f"{hash}.m3u8") + + return response + + +@app.route("/video_other//", methods=["GET"]) +def create_other_m3u8_quality(quality, hash): + other = OthersVideos.query.filter_by(video_hash=hash).first() + video_path = other.slug + duration = length_video(video_path) + file = f""" +#EXTM3U + +#EXT-X-VERSION:4 +#EXT-X-TARGETDURATION:{CHUNK_LENGTH} +#EXT-X-MEDIA-SEQUENCE:1 + """ + + for i in range(0, int(duration), CHUNK_LENGTH): + file += f""" +#EXTINF:{float(CHUNK_LENGTH)}, +/chunk_other/{quality}/{hash}-{(i // CHUNK_LENGTH) + 1}.ts + """ + + file += "\n#EXT-X-ENDLIST" + + response = make_response(file) + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set("Content-Disposition", "attachment", filename=f"{hash}.m3u8") + + return response + + +@app.route("/video_serie/", methods=["GET"]) +def create_serie_m3u8(episode_id): + episode = Episodes.query.filter_by(episode_id=episode_id).first() + episode_path = episode.slug + duration = length_video(episode_path) + file = f""" +#EXTM3U + +#EXT-X-VERSION:4 +#EXT-X-TARGETDURATION:{CHUNK_LENGTH} +#EXT-X-MEDIA-SEQUENCE:1 + """ + + for i in range(0, int(duration), CHUNK_LENGTH): + file += f""" +#EXTINF:{float(CHUNK_LENGTH)}, +/chunk_serie/{episode_id}-{(i // CHUNK_LENGTH) + 1}.ts + """ + + file += "\n#EXT-X-ENDLIST" + + response = make_response(file) + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set("Content-Disposition", "attachment", filename=f"{episode_id}") + + return response + + +@app.route("/video_serie//", methods=["GET"]) +def create_serie_m3u8_quality(quality, episode_id): + episode = Episodes.query.filter_by(episode_id=episode_id).first() + episode_path = episode.slug + duration = length_video(episode_path) + file = f""" +#EXTM3U + +#EXT-X-VERSION:4 +#EXT-X-TARGETDURATION:{CHUNK_LENGTH} +#EXT-X-MEDIA-SEQUENCE:1 + """ + + for i in range(0, int(duration), CHUNK_LENGTH): + file += f""" +#EXTINF:{float(CHUNK_LENGTH)}, +/chunk_serie/{quality}/{episode_id}-{(i // CHUNK_LENGTH) + 1}.ts + """ + + file += "\n#EXT-X-ENDLIST" + + response = make_response(file) + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set("Content-Disposition", "attachment", filename=f"{episode_id}") + + return response + + +@app.route("/chunk_serie/-.ts", methods=["GET"]) +def get_chunk_serie(episode_id, idx=0): + seconds = (idx - 1) * CHUNK_LENGTH + episode = Episodes.query.filter_by(episode_id=episode_id).first() + episode_path = episode.slug + + time_start = str(datetime.timedelta(seconds=seconds)) + time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) + log_level_value = "error" + command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + log_level_value, + "-ss", + time_start, + "-to", + time_end, + "-i", + episode_path, + "-output_ts_offset", + time_start, + "-c:v", + "libx264", + "-c:a", + "aac", + "-b:a", + "196k", + "-ac", + "2", + "-f", + "mpegts", + "pipe:1", + ] + + pipe = subprocess.Popen(command, stdout=subprocess.PIPE) + + response = make_response(pipe.stdout.read()) + response.headers.set("Content-Type", "video/MP2T") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{episode_id}-{idx}.ts" + ) + + return response + + +@app.route("/chunk_serie//-.ts", methods=["GET"]) +def get_chunk_serie_quality(quality, episode_id, idx=0): + seconds = (idx - 1) * CHUNK_LENGTH + episode = Episodes.query.filter_by(episode_id=episode_id).first() + episode_path = episode.slug + + time_start = str(datetime.timedelta(seconds=seconds)) + time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) + video_properties = get_video_properties(episode_path) + width = video_properties["width"] + height = video_properties["height"] + new_width = int(float(quality)) + new_height = round(float(width) / float(height) * new_width) + if (new_height % 2) != 0: + new_height += 1 + log_level_value = "error" + + bitrate = { + "1080": "192k", + "720": "192k", + "480": "128k", + "360": "128k", + "240": "96k", + "144": "64k", + } + + command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + log_level_value, + "-ss", + time_start, + "-to", + time_end, + "-i", + episode_path, + "-output_ts_offset", + time_start, + "-c:v", + "libx264", + "-vf", + f"scale={new_height}:{new_width}", + "-c:a", + "aac", + "-b:a", + bitrate[quality], + "-ac", + "2", + "-f", + "mpegts", + "pipe:1", + ] + + pipe = subprocess.Popen(command, stdout=subprocess.PIPE) + + response = make_response(pipe.stdout.read()) + response.headers.set("Content-Type", "video/MP2T") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{episode_id}-{idx}.ts" + ) + + return response + + +@app.route("/chunk_movie/-.ts", methods=["GET"]) +def chunk_movie(movie_id, idx=0): + seconds = (idx - 1) * CHUNK_LENGTH + movie = Movies.query.filter_by(id=movie_id).first() + video_path = movie.slug + + time_start = str(datetime.timedelta(seconds=seconds)) + time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) + log_level_value = "error" + + command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + log_level_value, + "-ss", + time_start, + "-to", + time_end, + "-i", + video_path, + "-output_ts_offset", + time_start, + "-c:v", + "libx264", + "-c:a", + "aac", + "-b:a", + "196k", + "-ac", + "2", + "-f", + "mpegts", + "pipe:1", + ] + pipe = subprocess.Popen(command, stdout=subprocess.PIPE) + + response = make_response(pipe.stdout.read()) + response.headers.set("Content-Type", "video/MP2T") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{movie_id}-{idx}.ts" + ) + + return response + + +@app.route("/chunk_movie//-.ts", methods=["GET"]) +def get_chunk_quality(quality, movie_id, idx=0): + seconds = (idx - 1) * CHUNK_LENGTH + + movie = Movies.query.filter_by(id=movie_id).first() + video_path = movie.slug + + time_start = str(datetime.timedelta(seconds=seconds)) + time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) + video_properties = get_video_properties(video_path) + width = video_properties["width"] + height = video_properties["height"] + new_width = int(float(quality)) + new_height = round(float(width) / float(height) * new_width) + while (new_height % 8) != 0: + new_height += 1 + + while (new_width % 8) != 0: + new_width += 1 + + a_bitrate = { + "1080": "192k", + "720": "192k", + "480": "128k", + "360": "128k", + "240": "96k", + "144": "64k", + } + + a_bitrate = ((int(quality) - 144) / (1080 - 144)) * (192 - 64) + 64 + + v_bitrate = ((int(quality) - 144) / (1080 - 144)) * (5000 - 1500) + 1500 + + if v_bitrate < 1500: + v_bitrate = 1500 + + log_level_value = "error" + command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + log_level_value, + "-ss", + time_start, + "-to", + time_end, + "-hwaccel", + "auto", + "-i", + video_path, + "-output_ts_offset", + time_start, + "-c:v", + "libx264", + "-vf", + f"scale={new_height}:{new_width}", + "-c:a", + "aac", + "-b:a", + f"{a_bitrate}k", + "-ac", + "2", + "-f", + "mpegts", + "pipe:1", + ] + + pipe = subprocess.Popen(command, stdout=subprocess.PIPE) + + response = make_response(pipe.stdout.read()) + response.headers.set("Content-Type", "video/MP2T") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{movie_id}-{idx}.ts" + ) + + return response + + +@app.route("/chunk_other/-.ts", methods=["GET"]) +def get_chunk_other(hash, idx=0): + seconds = (idx - 1) * CHUNK_LENGTH + movie = OthersVideos.query.filter_by(video_hash=hash).first() + video_path = movie.slug + + time_start = str(datetime.timedelta(seconds=seconds)) + time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) + log_level_value = "error" + + command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + log_level_value, + "-ss", + time_start, + "-to", + time_end, + "-i", + video_path, + "-output_ts_offset", + time_start, + "-c:v", + "libx264", + "-c:a", + "aac", + "-b:a", + "196k", + "-ac", + "2", + "-f", + "mpegts", + "pipe:1", + ] + pipe = subprocess.Popen(command, stdout=subprocess.PIPE) + + response = make_response(pipe.stdout.read()) + response.headers.set("Content-Type", "video/MP2T") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{hash}-{idx}.ts" + ) + + return response + + +@app.route("/chunk_other//-.ts", methods=["GET"]) +def get_chunk_other_quality(quality, hash, idx=0): + seconds = (idx - 1) * CHUNK_LENGTH + movie = OthersVideos.query.filter_by(video_hash=hash).first() + video_path = movie.slug + + time_start = str(datetime.timedelta(seconds=seconds)) + time_end = str(datetime.timedelta(seconds=seconds + CHUNK_LENGTH)) + video_properties = get_video_properties(video_path) + width = video_properties["width"] + height = video_properties["height"] + new_width = int(float(quality)) + new_height = round(float(width) / float(height) * new_width) + if (new_height % 2) != 0: + new_height += 1 + + bitrate = { + "1080": "192k", + "720": "192k", + "480": "128k", + "360": "128k", + "240": "96k", + "144": "64k", + } + + log_level_value = "error" + command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + log_level_value, + "-ss", + time_start, + "-to", + time_end, + "-i", + video_path, + "-output_ts_offset", + time_start, + "-c:v", + "libx264", + "-vf", + f"scale={new_height}:{new_width}", + "-c:a", + "aac", + "-b:a", + bitrate[quality], + "-ac", + "2", + "-f", + "mpegts", + "pipe:1", + ] + + pipe = subprocess.Popen(command, stdout=subprocess.PIPE) + + response = make_response(pipe.stdout.read()) + response.headers.set("Content-Type", "video/MP2T") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{hash}-{idx}.ts" + ) + + return response + + +@app.route("/chunk_caption//.vtt", methods=["GET"]) +def chunk_caption(movie_id, index): + movie = Movies.query.filter_by(id=movie_id).first() + video_path = movie.slug + extract_captions_command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + "error", + "-i", + video_path, + "-map", + f"0:{index}", + "-f", + "webvtt", + "pipe:1", + ] + extract_captions = subprocess.run(extract_captions_command, stdout=subprocess.PIPE) + + extract_captions_response = make_response(extract_captions.stdout) + extract_captions_response.headers.set("Content-Type", "text/VTT") + extract_captions_response.headers.set( + "Content-Disposition", "attachment", filename=f"{index}/{movie_id}.vtt" + ) + + return extract_captions_response + + +@app.route("/captionMovie/_.m3u8", methods=["GET"]) +def caption_movie_by_id_to_m3_u8(movie_id, id): + movie = Movies.query.filter_by(id=movie_id).first() + duration = movie.duration + duration = sum(x * int(t) for x, t in zip([3600, 60, 1], duration.split(":"))) + text = f""" +#EXTM3U +#EXT-X-TARGETDURATION:887 +#EXT-X-VERSION:3 +#EXT-X-MEDIA-SEQUENCE:1 +#EXT-X-PLAYLIST-TYPE:VOD +#EXTINF:{float(duration)+1}, +/chunk_caption/{id}/{movie_id}.vtt +#EXT-X-ENDLIST + """ + response = make_response(text) + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set("Accept-Encoding", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{movie_id}_{id}.m3u8" + ) + + return response + + +@app.route("/chunk_caption_serie///.vtt", methods=["GET"]) +def chunk_caption_serie(language, index, episode_id): + episode = Episodes.query.filter_by(episode_id=episode_id).first() + video_path = episode.slug + + extract_captions_command = [ + "ffmpeg", + "-hide_banner", + "-loglevel", + "error", + "-i", + video_path, + "-map", + f"0:{index}", + "-f", + "webvtt", + "pipe:1", + ] + + extract_captions = subprocess.run(extract_captions_command, stdout=subprocess.PIPE) + + extract_captions_response = make_response(extract_captions.stdout) + extract_captions_response.headers.set("Content-Type", "text/VTT") + extract_captions_response.headers.set( + "Content-Disposition", + "attachment", + filename=f"{language}/{index}/{episode_id}.vtt", + ) + + return extract_captions_response + + +@app.route("/get_language", methods=["GET"]) +def get_language(): + language = config["ChocolateSettings"]["language"] + return jsonify({"language": language}) + + +@app.route("/get_all_movies/", methods=["GET"]) +def get_all_movies(library): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SERVER") + username = all_auth_tokens[token]["user"] + + movies = Movies.query.filter_by(library_name=library).all() + user = Users.query.filter_by(name=username).first() + + movies_list = [movie.__dict__ for movie in movies] + + user_type = user.account_type + for movie in movies_list: + del movie["_sa_instance_state"] + + if user_type in ["Kid", "Teen"]: + for movie in movies_list: + if movie["adult"] == "True": + movies_list.remove(movie) + + used_keys = [ + "real_title", + "banner", + "cover", + "description", + "id", + "note", + "duration", + ] + + + merged_lib = LibrariesMerge.query.filter_by(parent_lib=library).all() + merged_lib = [child.child_lib for child in merged_lib] + + for lib in merged_lib: + movies = Movies.query.filter_by(library_name=lib).all() + movies_list += [movie.__dict__ for movie in movies] + + for movie in movies_list: + for key in list(movie.keys()): + if key not in used_keys: + del movie[key] + + movies_list = natsort.natsorted(movies_list, key=itemgetter(*["real_title"])) + + return jsonify(movies_list) + + +@app.route("/get_all_books/", methods=["GET"]) +def get_all_books(library): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SUCCESS") + + books = Books.query.filter_by(library_name=library).all() + books_list = [book.__dict__ for book in books] + + merged_lib = LibrariesMerge.query.filter_by(parent_lib=library).all() + merged_lib = [child.child_lib for child in merged_lib] + + for lib in merged_lib: + books = Books.query.filter_by(library_name=lib).all() + books_list += [book.__dict__ for book in books] + + for book in books_list: + del book["_sa_instance_state"] + del book["slug"] + del book["book_type"] + del book["cover"] + del book["library_name"] + + books_list = natsort.natsorted(books_list, key=itemgetter(*["title"])) + + return jsonify(books_list) + + +@app.route("/get_all_playlists/", methods=["GET"]) +def get_all_playlists(library): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SUCCESS") + + username = all_auth_tokens[token]["user"] + user = Users.query.filter_by(name=username).first() + user_id = user.id + + playlists = Playlists.query.filter( + Playlists.user_id.like(f"%{user_id}%"), Playlists.library_name == library + ).all() + playlists_list = [playlist.__dict__ for playlist in playlists] + + for playlist in playlists_list: + del playlist["_sa_instance_state"] + + playlists_list = natsort.natsorted(playlists_list, key=itemgetter(*["name"])) + + liked_music = MusicLiked.query.filter_by(user_id=user_id, liked="true").all() + musics = [] + for music in liked_music: + music_id = music.music_id + musics.append(music_id) + musics = ",".join(musics) + + if len(musics) > 0: + playlists_list.insert( + 0, + { + "id": 0, + "name": "Likes", + "tracks": musics, + "cover": "/static/img/likes.webp", + }, + ) + + return jsonify(playlists_list) + + +@app.route("/get_all_albums/", methods=["GET"]) +def get_all_albums(library): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SUCCESS") + + albums = Albums.query.filter_by(library_name=library).all() + albums_list = [album.__dict__ for album in albums] + + for album in albums_list: + del album["_sa_instance_state"] + + albums_list = natsort.natsorted(albums_list, key=itemgetter(*["name"])) + + return jsonify(albums_list) + + +@app.route("/get_all_artists/", methods=["GET"]) +def get_all_artists(library): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SUCCESS") + + artists = Artists.query.filter_by(library_name=library).all() + artists_list = [artist.__dict__ for artist in artists] + + for artist in artists_list: + del artist["_sa_instance_state"] + + artists_list = natsort.natsorted(artists_list, key=itemgetter(*["name"])) + + return jsonify(artists_list) + + +@app.route("/get_all_tracks/", methods=["GET"]) +def get_all_tracks(library): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SUCCESS") + + tracks = Tracks.query.filter_by(library_name=library).all() + tracks_list = [track.__dict__ for track in tracks] + + for track in tracks_list: + del track["_sa_instance_state"] + try: + album_name = Albums.query.filter_by(id=track["album_id"]).first().name + track["album_name"] = album_name + except Exception: + track["album_name"] = None + + try: + artist_name = Artists.query.filter_by(id=track["artist_id"]).first().name + track["artist_name"] = artist_name + except Exception: + track["artist_name"] = None + + tracks_list = natsort.natsorted(tracks_list, key=itemgetter(*["name"])) + + return jsonify(tracks_list) + + +@app.route("/get_album_tracks/") +def get_album_tracks(album_id): + token = request.headers.get("Authorization") + + try: + user = all_auth_tokens[token]["user"] + generate_log(request, "SUCCESS") + except Exception: + generate_log(request, "ERROR") + return jsonify({"error": "Invalid token"}) + + user = Users.query.filter_by(name=user).first() + user_id = user.id + + tracks = Tracks.query.filter_by(album_id=album_id).all() + tracks_list = [track.__dict__ for track in tracks] + + artist = Artists.query.filter_by(id=tracks_list[0]["artist_id"]).first().name + album = Albums.query.filter_by(id=tracks_list[0]["album_id"]).first().name + + for track in tracks_list: + del track["_sa_instance_state"] + + track["artist_name"] = artist + track["album_name"] = album + + music_like = MusicLiked.query.filter_by( + music_id=track["id"], user_id=user_id + ).first() + if music_like: + track["liked"] = music_like.liked + else: + track["liked"] = False + + return jsonify(tracks_list) + + +@app.route("/get_playlist_tracks/") +def get_playlist_tracks(playlist_id): + token = request.headers.get("Authorization") + + try: + user = all_auth_tokens[token]["user"] + generate_log(request, "SUCCESS") + except Exception: + generate_log(request, "ERROR") + return jsonify({"error": "Invalid token"}) + + user = Users.query.filter_by(name=user).first() + user_id = user.id + tracks_list = [] + if playlist_id != "0": + tracks = Playlists.query.filter( + Playlists.user_id.like(f"%{user_id}%"), Playlists.id == playlist_id + ).first() + tracks = tracks.tracks.split(",") + for track in tracks: + track = Tracks.query.filter_by(id=track).first().__dict__ + + del track["_sa_instance_state"] + + music_like = MusicLiked.query.filter_by( + music_id=track["id"], user_id=user_id + ).first() + if music_like: + track["liked"] = music_like.liked + else: + track["liked"] = False + + if "album_id" in track: + album = Albums.query.filter_by(id=track["album_id"]).first() + if album: + track["album_name"] = album.name + + if "artist_id" in track: + artist = Artists.query.filter_by(id=track["artist_id"]).first() + if artist: + track["artist_name"] = artist.name + + tracks_list.append(track) + else: + likes = MusicLiked.query.filter_by(user_id=user_id, liked="true").all() + for like in likes: + track = Tracks.query.filter_by(id=like.music_id).first().__dict__ + + del track["_sa_instance_state"] + + music_like = MusicLiked.query.filter_by( + music_id=track["id"], user_id=user_id + ).first() + track["liked"] = music_like.liked + track["liked_at"] = music_like.liked_at + + if "album_id" in track: + album = Albums.query.filter_by(id=track["album_id"]).first() + track["album_name"] = album.name + + if "artist_id" in track: + artist = Artists.query.filter_by(id=track["artist_id"]).first() + track["artist_name"] = artist.name + + tracks_list.append(track) + + tracks_list = sorted(tracks_list, key=lambda k: k["liked_at"]) + + return jsonify(tracks_list) + + +@app.route("/play_track//", methods=["POST"]) +def play_track(id, user_id): + exists_in_music_played = MusicPlayed.query.filter_by( + music_id=id, user_id=user_id + ).first() + play_count = 0 + if exists_in_music_played: + exists_in_music_played.play_count = int(exists_in_music_played.play_count) + 1 + DB.session.commit() + play_count = exists_in_music_played.play_count + else: + music_played = MusicPlayed(music_id=id, user_id=user_id, play_count=1) + DB.session.add(music_played) + DB.session.commit() + play_count = music_played.play_count + + return jsonify( + { + "status": "success", + "music_id": id, + "user_id": user_id, + "play_count": play_count, + } + ) + + +@app.route("/like_track//", methods=["POST"]) +def like_track(id, user_id): + exist_in_mucis_liked = MusicLiked.query.filter_by( + music_id=id, user_id=user_id + ).first() + liked = False + like_dict = {"true": "false", "false": "true"} + if exist_in_mucis_liked: + exist_in_mucis_liked.liked = like_dict[exist_in_mucis_liked.liked] + liked = like_dict[exist_in_mucis_liked.liked] + exist_in_mucis_liked.liked_at = time() + DB.session.commit() + else: + music_liked = MusicLiked( + music_id=id, user_id=user_id, liked="true", liked_at=time() + ) + DB.session.add(music_liked) + DB.session.commit() + liked = music_liked.liked + + return jsonify( + {"status": "success", "music_id": id, "user_id": user_id, "liked": liked} + ) + + +@app.route("/create_playlist", methods=["POST"]) +def create_playlist(): + body = request.get_json() + + name = body["name"] + user_id = body["user_id"] + track_id = body["track_id"] + library = body["library"] + + exists = Playlists.query.filter_by( + name=name, user_id=user_id, library_name=library + ).first() + if exists: + return jsonify({"status": "error", "error": "Playlist already exists"}) + track = Tracks.query.filter_by(id=track_id).first() + duration = 0 + cover = track.cover + cover = generate_playlist_cover(track_id) + if not cover: + cover = "ahaha" + playlist = Playlists( + name=name, + user_id=user_id, + tracks=f"{track_id}", + library_name=library, + duration=duration, + cover=cover, + ) + DB.session.add(playlist) + DB.session.commit() + + return jsonify({"status": "success", "playlist_id": playlist.id}) + + +def generate_playlist_cover(id): + if isinstance(id, str) or isinstance(id, int): + id = int(id) + track = Tracks.query.filter_by(id=id).first() + cover = track.cover + return cover + elif isinstance(id, list): + tracks = [] + id_to_append = 0 + for i in range(4): + try: + tracks.append(id[i]) + except Exception: + tracks.append(id[id_to_append]) + id_to_append += 1 + + covers = [] + for track in tracks: + track = Tracks.query.filter_by(id=track).first() + + covers.append(track.cover) + + im1 = Image.open(covers[0]) + im2 = Image.open(covers[1]) + im3 = Image.open(covers[2]) + im4 = Image.open(covers[3]) + + im1 = im1.resize((200, 200)) + im2 = im2.resize((200, 200)) + im3 = im3.resize((200, 200)) + im4 = im4.resize((200, 200)) + + im1 = im1.crop((0, 0, 100, 100)) + im2 = im2.crop((100, 0, 200, 100)) + im3 = im3.crop((0, 100, 100, 200)) + im4 = im4.crop((100, 100, 200, 200)) + + im = Image.new("RGB", (200, 200)) + im.paste(im1, (0, 0)) + im.paste(im2, (100, 0)) + im.paste(im3, (0, 100)) + im.paste(im4, (100, 100)) + + cover = f"{IMAGES_PATH}/Playlist_{uuid4()}.webp" + exist = os.path.exists(cover) + while exist: + cover = f"{IMAGES_PATH}/Playlist_{uuid4()}.webp" + exist = os.path.exists(cover) + im.save(cover, "WEBP") + + im1.close() + im2.close() + im3.close() + im4.close() + + return cover + + +@app.route("/add_track_to_playlist", methods=["POST"]) +def add_track_to_playlist(): + body = request.get_json() + + playlist_id = body["playlist_id"] + track_id = body["track_id"] + + playlist = Playlists.query.filter_by(id=playlist_id).first() + if playlist.tracks == "": + playlist.tracks = track_id + else: + playlist.tracks += f",{track_id}" + cover = generate_playlist_cover(playlist.tracks.split(",")) + playlist.cover = cover + DB.session.commit() + + return jsonify( + {"status": "success", "playlist_id": playlist_id, "track_id": track_id} + ) + + +@app.route("/get_track/") +def get_track(id): + track = Tracks.query.filter_by(id=id).first().slug + + return send_file(track) + + +@app.route("/get_album/") +def get_album(album_id): + generate_log(request, "SUCCESS") + + album = Albums.query.filter_by(id=album_id).first() + album_dict = album.__dict__ + del album_dict["_sa_instance_state"] + + artist = Artists.query.filter_by(id=album_dict["artist_id"]).first().name + album_dict["artist_name"] = artist + + return jsonify(album_dict) + + +@app.route("/get_playlist/") +def get_playlist(playlist_id): + generate_log(request, "SUCCESS") + token = request.headers.get("Authorization") + user = all_auth_tokens[token]["user"] + user = Users.query.filter_by(name=user).first() + user_id = user.id + + if playlist_id != "0": + playlist = Playlists.query.filter_by(id=playlist_id).first() + playlist_dict = playlist.__dict__ + del playlist_dict["_sa_instance_state"] + else: + liked_music = MusicLiked.query.filter_by(user_id=user_id, liked="true").all() + musics = [] + for music in liked_music: + music_id = music.music_id + musics.append(music_id) + musics = ",".join(musics) + + playlist_dict = { + "id": 0, + "name": "Likes", + "tracks": musics, + "cover": "/static/img/likes.webp", + } + + return jsonify(playlist_dict) + + +@app.route("/get_artist/") +def get_artist(artist_id): + generate_log(request, "SUCCESS") + + artist = Artists.query.filter_by(id=artist_id).first() + artist_dict = artist.__dict__ + del artist_dict["_sa_instance_state"] + + return jsonify(artist_dict) + + +@app.route("/get_artist_albums/") +def get_artist_albums(artist_id): + albums = Albums.query.filter_by(artist_id=artist_id).all() + artist = Artists.query.filter_by(id=artist_id).first() + library = artist.library_name + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SUCCESS") + + albums_list = [album.__dict__ for album in albums] + + for album in albums_list: + del album["_sa_instance_state"] + + return jsonify(albums_list) + + +@app.route("/get_artist_tracks/") +def get_artist_tracks(artist_id): + generate_log(request, "SUCCESS") + + tracks = Tracks.query.filter_by(artist_id=artist_id).all() + tracks_list = [track.__dict__ for track in tracks] + + for track in tracks_list: + del track["_sa_instance_state"] + try: + album_name = Albums.query.filter_by(id=track["album_id"]).first().name + track["album_name"] = album_name + except Exception: + pass + + try: + artist_name = Artists.query.filter_by(id=track["artist_id"]).first().name + track["artist_name"] = artist_name + except Exception: + pass + + return jsonify(tracks_list) + + +@app.route("/get_all_series/", methods=["GET"]) +def get_all_series(library): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + abort(401) + + generate_log(request, "SUCCESS") + + username = all_auth_tokens[token]["user"] + + series = Series.query.filter_by(library_name=library).all() + the_lib = Libraries.query.filter_by(lib_name=library).first() + user = Users.query.filter_by(name=username).first() + user_id = user.id + user_in_the_lib = user_in_lib(user_id, the_lib) + + if not user_in_the_lib: + abort(401) + + if series is None or user is None: + abort(404) + + series_list = [serie.__dict__ for serie in series] + + user_type = user.account_type + + if user_type in ["Kid", "Teen"]: + for serie in series_list: + if serie["adult"] == "True": + series_list.remove(serie) + + merged_lib = LibrariesMerge.query.filter_by(parent_lib=library).all() + merged_lib = [child.child_lib for child in merged_lib] + + for lib in merged_lib: + series = Series.query.filter_by(library_name=lib).all() + series_list += [serie.__dict__ for serie in series] + + for serie in series_list: + del serie["_sa_instance_state"] + + for serie in series_list: + serie["seasons"] = get_seasons(serie["id"]) + + series_list = natsort.natsorted(series_list, key=itemgetter(*["original_name"])) + + return jsonify(series_list) + + +def get_seasons(id): + seasons = Seasons.query.filter_by(serie=id).all() + seasons_list = [season.__dict__ for season in seasons] + for season in seasons_list: + del season["_sa_instance_state"] + + return seasons_list + + +def get_similar_movies(movie_id): + global searched_films + similar_movies_possessed = [] + movie = Movie() + similar_movies = movie.recommendations(movie_id) + for movie_info in similar_movies: + movie_name = movie_info.title + for movie in searched_films: + if movie_name == movie: + similar_movies_possessed.append(movie) + break + return similar_movies_possessed + + +@app.route("/get_movie_data/", methods=["GET"]) +def get_movie_data(movie_id): + exists = Movies.query.filter_by(id=movie_id).first() is not None + if exists: + movie = Movies.query.filter_by(id=movie_id).first().__dict__ + del movie["_sa_instance_state"] + movie["similarMovies"] = get_similar_movies(movie_id) + return jsonify(movie) + else: + abort(404) + + +@app.route("/get_other_data/", methods=["GET"]) +def get_other_data(video_hash): + exists = OthersVideos.query.filter_by(video_hash=video_hash).first() is not None + if exists: + other = OthersVideos.query.filter_by(video_hash=video_hash).first().__dict__ + del other["_sa_instance_state"] + return jsonify(other) + else: + abort(404) + + +@app.route("/get_serie_data/", methods=["GET"]) +def get_series_data(serie_id): + exists = Series.query.filter_by(id=serie_id).first() is not None + if exists: + serie = Series.query.filter_by(id=serie_id).first().__dict__ + serie["seasons"] = get_serie_seasons(serie["id"]) + + latest_episode_watched_db = LatestEpisodeWatched.query.filter_by( + serie_id=serie_id + ).first() + if latest_episode_watched_db is not None: + serie["latest_id"] = latest_episode_watched_db.episode_id + else: + serie["latest_id"] = None + + del serie["_sa_instance_state"] + return jsonify(serie) + else: + abort(404) + + +def get_serie_seasons(id): + seasons = Seasons.query.filter_by(serie=id).all() + seasons_dict = {} + for season in seasons: + seasons_dict[season.season_number] = dict(season.__dict__) + del seasons_dict[season.season_number]["_sa_instance_state"] + return seasons_dict + + +def transform(obj): + if isinstance(obj, AsObj): + return str(obj) + return obj.replace('"', '\\"') + + +@app.route("/edit_movie//", methods=["GET", "POST"]) +def edit_movie(id, library): + if request.method == "GET": + the_movie = Movies.query.filter_by(id=id, library_name=library).first() + the_movie = the_movie.__dict__ + del the_movie["_sa_instance_state"] + movie_name = guessit(the_movie["title"])["title"] + file_title = the_movie["slug"] + tmdb = TMDb() + tmdb.language = config["ChocolateSettings"]["language"].lower() + movie = Movie() + movie_info = Search().movies(movie_name) + movie_info = sorted(movie_info, key=lambda k: k["popularity"], reverse=True) + + real_movies = [] + for the_movie in movie_info: + accepted_types = [str, int, list, dict, float, bool] + the_movie = the_movie.__dict__ + for key in the_movie: + if type(the_movie[key]) not in accepted_types: + the_movie[key] = str(the_movie[key]) + real_movies.append(the_movie) + + movies = {"movies": real_movies, "file_title": file_title} + + return jsonify(movies) + + new_movie_id = request.get_json()["new_id"] + + if str(new_movie_id) == str(id): + return jsonify( + {"status": "error", "error": "The new id is the same as the old one"} + ) + the_movie = Movies.query.filter_by(id=id, library_name=library).first() + + movie = Movie() + movie_info = movie.details(new_movie_id) + the_movie.id = new_movie_id + the_movie.real_title = movie_info.title + the_movie.description = movie_info.overview + the_movie.note = movie_info.vote_average + date = movie_info.release_date + + try: + date = datetime.datetime.strptime(date, "%Y-%m-%d").strftime("%d/%m/%Y") + except ValueError: + date = "Unknown" + except UnboundLocalError: + date = "Unknown" + + the_movie.date = date + + bande_annonce = movie_info.videos.results + + bande_annonce_url = "" + if len(bande_annonce) > 0: + for video in bande_annonce: + bande_annonce_type = video.type + bande_annonce_host = video.site + bande_annonce_key = video.key + if bande_annonce_type == "Trailer": + try: + bande_annonce_url = ( + websites_trailers[bande_annonce_host] + bande_annonce_key + ) + break + except KeyError as e: + bande_annonce_url = "Unknown" + print(e) + + the_movie.bande_annonce_url = bande_annonce_url + the_movie.adult = str(movie_info.adult) + + alternatives_names = [] + actual_title = movie_info.title + characters = [" ", "-", "_", ":", ".", ",", "!", "'", "`", '"'] + empty = "" + for character in characters: + for character2 in characters: + if character != character2: + string_test = actual_title.replace(character, character2) + alternatives_names.append(string_test) + string_test = actual_title.replace(character2, character) + alternatives_names.append(string_test) + string_test = actual_title.replace(character, empty) + alternatives_names.append(string_test) + string_test = actual_title.replace(character2, empty) + alternatives_names.append(string_test) + + official_alternative_names = movie.alternative_titles(movie_id=the_movie.id).titles + if official_alternative_names is not None: + for official_alternative_name in official_alternative_names: + alternatives_names.append(official_alternative_name.title) + + alternatives_names = list(dict.fromkeys(alternatives_names)) + + alternatives_names = ",".join(alternatives_names) + + the_movie.alternatives_names = alternatives_names + + movie_genre = [] + genre = movie_info.genres + for genre_info in genre: + movie_genre.append(genre_info.name) + movie_genre = ",".join(movie_genre) + + the_movie.genre = movie_genre + casts = movie_info.casts.__dict__["cast"] + + the_cast = [] + for cast in casts: + actor_id = cast.id + actor_image = ( + f"https://www.themoviedb.org/t/p/w600_and_h900_bestv2{cast.profile_path}" + ) + if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): + with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: + f.write(requests.get(actor_image).content) + try: + img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") + os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") + except Exception: + os.rename( + f"{IMAGES_PATH}/Actor_{actor_id}.png", + f"{IMAGES_PATH}/Actor_{actor_id}.webp", + ) + + actor_image = f"{IMAGES_PATH}/Actor_{actor_id}.webp" + if actor_id not in the_cast: + the_cast.append(actor_id) + else: + break + person = Person() + p = person.details(actor_id) + exists = Actors.query.filter_by(actor_id=actor_id).first() is not None + if not exists: + actor = Actors( + name=cast.name, + actor_image=actor_image, + actor_description=p.biography, + actor_birth_date=p.birthday, + actor_birth_place=p.place_of_birth, + actor_programs=f"{the_movie.id}", + actor_id=actor_id, + ) + DB.session.add(actor) + DB.session.commit() + elif exists and str(the_movie.id) not in str( + Actors.query.filter_by(actor_id=cast.id).first().actor_programs + ).split(" "): + actor = Actors.query.filter_by(actor_id=cast.id).first() + actor.actor_programs = f"{actor.actor_programs} {the_movie.id}" + DB.session.commit() + + the_cast = the_cast[:5] + the_movie.cast = ",".join([str(x) for x in the_cast]) + + movie_cover_path = f"https://image.tmdb.org/t/p/original{movie_info.poster_path}" + banner = f"https://image.tmdb.org/t/p/original{movie_info.backdrop_path}" + + try: + os.remove(f"{IMAGES_PATH}/{new_movie_id}_Cover.webp") + except FileNotFoundError: + pass + try: + os.remove(f"{IMAGES_PATH}/{new_movie_id}_Cover.png") + except FileNotFoundError: + pass + with open(f"{IMAGES_PATH}/{new_movie_id}_Cover.png", "wb") as f: + f.write(requests.get(movie_cover_path).content) + try: + img = Image.open(f"{IMAGES_PATH}/{new_movie_id}_Cover.png") + img.save(f"{IMAGES_PATH}/{new_movie_id}_Cover.webp", "webp") + os.remove(f"{IMAGES_PATH}/{new_movie_id}_Cover.png") + movie_cover_path = f"{IMAGES_PATH}/{new_movie_id}_Cover.webp" + img.close() + except Exception: + os.rename( + f"{IMAGES_PATH}/{new_movie_id}_Cover.png", + f"{IMAGES_PATH}/{new_movie_id}_Cover.webp", + ) + movie_cover_path = "/static/img/broken.webp" + try: + os.remove(f"{IMAGES_PATH}/{new_movie_id}_Banner.webp") + except FileNotFoundError: + pass + with open(f"{IMAGES_PATH}/{new_movie_id}_Banner.png", "wb") as f: + f.write(requests.get(banner).content) + if not movie_info.backdrop_path: + banner = f"https://image.tmdb.org/t/p/original{movie_info.backdrop_path}" + if banner != "https://image.tmdb.org/t/p/originalNone": + with open(f"{IMAGES_PATH}/{new_movie_id}_Banner.png", "wb") as f: + f.write(requests.get(banner).content) + else: + banner = "/static/img/broken.webp" + try: + img = Image.open(f"{IMAGES_PATH}/{new_movie_id}_Banner.png") + img.save(f"{IMAGES_PATH}/{new_movie_id}_Banner.webp", "webp") + os.remove(f"{IMAGES_PATH}/{new_movie_id}_Banner.png") + banner = f"{IMAGES_PATH}/{new_movie_id}_Banner.webp" + img.close() + except Exception: + os.rename( + f"{IMAGES_PATH}/{new_movie_id}_Banner.png", + f"{IMAGES_PATH}/{new_movie_id}_Banner.webp", + ) + banner = "/static/img/brokenBanner.webp" + + if str(id) in movie_cover_path: + movie_cover_path = movie_cover_path.replace(str(id), str(new_movie_id)) + if str(id) in banner: + banner = banner.replace(str(id), str(new_movie_id)) + + the_movie.cover = movie_cover_path + the_movie.banner = banner + DB.session.commit() + + return jsonify({"status": "success"}) + + +@app.route("/edit_serie//", methods=["GET", "POST"]) +def edit_serie(id, library): + if request.method == "GET": + serie = Series.query.filter_by(id=id, library_name=library).first().__dict__ + + del serie["_sa_instance_state"] + serie_name = serie["original_name"] + tmdb = TMDb() + tmdb.language = config["ChocolateSettings"]["language"].lower() + serie_info = Search().tv_shows(serie_name) + if serie_info.results == {}: + data = { + "series": [], + "folder_title": serie["original_name"], + } + return jsonify(data, default=transform, indent=4) + + serie_info = sorted(serie_info, key=lambda k: k["popularity"], reverse=True) + + real_series = [] + for the_serie in serie_info: + accepted_types = [str, int, list, dict, float, bool] + the_serie = the_serie.__dict__ + for key in the_serie: + if type(the_serie[key]) not in accepted_types: + the_serie[key] = str(the_serie[key]) + real_series.append(the_serie) + + data = { + "series": real_series, + "folder_title": serie["original_name"], + } + + return jsonify(data, default=transform, indent=4) + + elif request.method == "POST": + serie_id = request.get_json()["new_id"] + the_serie = Series.query.filter_by(id=id, library_name=library).first() + + if the_serie.id == serie_id: + return jsonify({"status": "success"}) + + all_seasons = Seasons.query.filter_by(serie=serie_id).all() + for season in all_seasons: + cover = f"{dir_path}{season.season_cover_path}" + try: + os.remove(cover) + except FileNotFoundError: + pass + episodes = Episodes.query.filter_by(season_id=season.season_number).all() + for episode in episodes: + cover = f"{dir_path}{episode.episode_cover_path}" + os.remove(cover) + DB.session.delete(episode) + DB.session.delete(season) + DB.session.commit() + + tmdb = TMDb() + tmdb.language = config["ChocolateSettings"]["language"].lower() + show = TV() + details = show.details(serie_id) + res = details + + name = details.name + cover = f"https://image.tmdb.org/t/p/original{res.poster_path}" + banner = f"https://image.tmdb.org/t/p/original{res.backdrop_path}" + if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Cover.webp"): + with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: + f.write(requests.get(cover).content) + + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") + img = img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") + img.close() + os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") + else: + os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.webp") + with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: + f.write(requests.get(cover).content) + + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") + img = img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") + os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") + img.close() + + if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.webp"): + with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: + f.write(requests.get(banner).content) + + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") + img = img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") + img.close() + os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") + else: + os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.webp") + with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: + f.write(requests.get(banner).content) + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") + img = img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") + img.close() + os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") + + banner = f"{IMAGES_PATH}/{serie_id}_Banner.webp" + cover = f"{IMAGES_PATH}/{serie_id}_Cover.webp" + description = res["overview"] + note = res.vote_average + date = res.first_air_date + cast = details.credits.cast + run_time = details.episode_run_time + duration = "" + for i in range(len(run_time)): + if i != len(run_time) - 1: + duration += f"{str(run_time[i])}:" + else: + duration += f"{str(run_time[i])}" + serie_genre = details.genres + bande_annonce = details.videos.results + bande_annonce_url = "" + if len(bande_annonce) > 0: + for video in bande_annonce: + bande_annonce_type = video.type + bande_annonce_host = video.site + bande_annonce_key = video.key + if bande_annonce_type == "Trailer" or len(bande_annonce) == 1: + try: + bande_annonce_url = ( + websites_trailers[bande_annonce_host] + bande_annonce_key + ) + break + except KeyError as e: + bande_annonce_url = "Unknown" + print(e) + genre_list = [] + for genre in serie_genre: + genre_list.append(str(genre.name)) + new_cast = [] + cast = list(cast)[:5] + for actor in cast: + actor_name = actor.name.replace("/", "") + actor_id = actor.id + actor_image = f"https://image.tmdb.org/t/p/original{actor.profile_path}" + if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): + with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: + f.write(requests.get(actor_image).content) + img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") + img.close() + os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") + else: + os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.webp") + with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: + f.write(requests.get(actor_image).content) + img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") + img.close() + os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") + + actor_image = f"{IMAGES_PATH}/Actor_{actor_id}.webp" + actor_character = actor.character + actor.profile_path = str(actor_image) + this_actor = [ + str(actor_name), + str(actor_character), + str(actor_image), + str(actor.id), + ] + new_cast.append(this_actor) + + person = Person() + p = person.details(actor.id) + exists = Actors.query.filter_by(actor_id=actor.id).first() is not None + if not exists: + actor = Actors( + name=actor.name, + actor_id=actor.id, + actor_image=actor_image, + actor_description=p.biography, + actor_birth_date=p.birthday, + actor_birth_place=p.place_of_birth, + actor_programs=f"{serie_id}", + ) + DB.session.add(actor) + DB.session.commit() + else: + actor = Actors.query.filter_by(actor_id=actor.id).first() + actor.actor_programs = f"{actor.actor_programs} {serie_id}" + DB.session.commit() + all_series_path = Libraries.query.filter_by(lib_name=library).first().lib_folder + serie_modified_time = os.path.getmtime( + f"{all_series_path}/{the_serie.original_name}" + ) + + new_cast = jsonify(new_cast[:5]) + genre_list = jsonify(genre_list) + is_adult = str(details["adult"]) + the_serie.id = serie_id + the_serie.name = name + the_serie.genre = genre_list + the_serie.duration = duration + the_serie.description = description + the_serie.cast = new_cast + the_serie.bande_annonce_url = bande_annonce_url + the_serie.cover = cover + the_serie.banner = banner + the_serie.note = note + the_serie.date = date + the_serie.serie_modified_time = serie_modified_time + the_serie.adult = is_adult + the_serie.library_name = library + + DB.session.commit() + scans.getSeries(library) + + return jsonify({"status": "success"}) + + +@app.route("/get_season_data/", methods=["GET"]) +def get_season_data(season_id): + season = Seasons.query.filter_by(season_id=season_id).first() + if season is None: + abort(404) + episodes = Episodes.query.filter_by(season_id=season_id).all() + episodes_dict = {} + for episode in episodes: + episodes_dict[episode.episode_number] = dict(episode.__dict__) + del episodes_dict[episode.episode_number]["_sa_instance_state"] + season = season.__dict__ + del season["_sa_instance_state"] + season["episodes"] = episodes_dict + return jsonify(season) + + +def sort_by_episode_number(episode): + return episode["episode_number"] + + +@app.route("/get_episodes/", methods=["GET"]) +def get_episodes(season_id): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + abort(401) + + username = all_auth_tokens[token]["user"] + + user = Users.query.filter_by(name=username).first() + season = Seasons.query.filter_by(season_id=season_id).first() + serie = Series.query.filter_by(id=season.serie).first() + library = serie.library_name + library = Libraries.query.filter_by(lib_name=library).first() + + if user is None: + abort(404) + + if serie is None: + abort(404) + + if season is None: + abort(404) + + user_in_the_lib = user_in_lib(user.id, library) + if not user_in_the_lib: + abort(401) + + if serie is None or user is None: + abort(404) + + episodes = Episodes.query.filter_by(season_id=season_id).all() + episodes_list = [] + + for episode in episodes: + the_episode = dict(episode.__dict__) + del the_episode["_sa_instance_state"] + episodes_list.append(the_episode) + + episodes_list = natsort.natsorted( + episodes_list, key=itemgetter(*["episode_number"]) + ) + + data = { + "episodes": episodes_list, + "library": library.lib_name, + } + + return jsonify(data) + + +@app.route("/get_episode_data/", methods=["GET"]) +def get_episode_data(episode_id): + episode = Episodes.query.filter_by(episode_id=episode_id).first() + if episode is None: + abort(404) + + episode = episode.__dict__ + + season = episode["season_id"] + episode_number = episode["episode_number"] + all_episodes = Episodes.query.filter_by(season_id=season).all() + all_episodes_list = [] + for episode_item in all_episodes: + all_episodes_list.append(dict(episode_item.__dict__)) + all_episodes_list = sorted(all_episodes_list, key=lambda k: k["episode_number"]) + episode_index = all_episodes_list.index( + [x for x in all_episodes_list if x["episode_number"] == episode_number][0] + ) + previous_episode, next_episode = None, None + + if episode_index != 0: + previous_episode = all_episodes_list[episode_index - 1]["episode_id"] + if episode_index != len(all_episodes_list) - 1: + next_episode = all_episodes_list[episode_index + 1]["episode_id"] + + new_episode_data = episode + + del new_episode_data["_sa_instance_state"] + new_episode_data["previous_episode"] = previous_episode + new_episode_data["next_episode"] = next_episode + + return jsonify(new_episode_data) + + +@app.route("/book_url/") +def book_url(id): + book = Books.query.filter_by(id=id).first() + if book is None: + abort(404) + book = book.__dict__ + return send_file(book["slug"], as_attachment=True) + + +@app.route("/book_url//") +def book_url_page(id, page): + book = Books.query.filter_by(id=id).first() + if book is None: + abort(404) + book = book.__dict__ + book_type = book["book_type"] + book_slug = book["slug"] + available = ["PDF", "CBZ", "CBR", "EPUB"] + if book_type in available: + if book_type == "PDF" or book_type == "EPUB": + pdf_doc = fitz.open(book_slug) + page = pdf_doc[int(page)] + image_stream = io.BytesIO(page.get_pixmap().tobytes("jpg")) + image_stream.seek(0) + fitz.close() + return send_file(image_stream, mimetype="image/jpeg") + + elif book_type == "CBZ": + with zipfile.ZipFile(book_slug, "r") as zip: + image_file = zip.namelist()[int(page)] + if image_file.endswith((".jpg", ".jpeg", ".png")): + with zip.open(image_file) as image: + image_stream = io.BytesIO(image.read()) + image_stream.seek(0) + return send_file(image_stream, mimetype="image/jpeg") + + elif book_type == "CBR": + with rarfile.RarFile(book_slug, "r") as rar: + image_file = rar.infolist()[int(page)] + if image_file.filename.endswith((".jpg", ".jpeg", ".png")): + with rar.open(image_file) as image: + image_stream = io.BytesIO(image.read()) + image_stream.seek(0) + return send_file(image_stream, mimetype="image/jpeg") + + abort(404, "Book type not supported") + + +@app.route("/book_data/") +def book_data(id): + book = Books.query.filter_by(id=id).first().__dict__ + del book["_sa_instance_state"] + book_type = book["book_type"] + book_slug = book["slug"] + nb_pages = 0 + if book_type == "PDF" or book_type == "EPUB": + pdfDoc = fitz.open(book_slug) + nb_pages = pdfDoc.page_count + pdfDoc.close() + elif book_type == "CBZ": + with zipfile.ZipFile(book_slug, "r") as zip: + nb_pages = len(zip.namelist()) + elif book_type == "CBR": + with rarfile.RarFile(book_slug, "r") as rar: + nb_pages = len(rar.infolist()) + book["nb_pages"] = nb_pages + return jsonify(book) + + +@app.route("/download_other/") +def download_other(video_hash): + video = OthersVideos.query.filter_by(video_hash=video_hash).first() + video = video.__dict__ + del video["_sa_instance_state"] + return send_file(video["slug"], as_attachment=True) + + +@app.route("/get_all_others/") +def get_all_others(library): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + abort(401) + + username = all_auth_tokens[token]["user"] + + the_lib = Libraries.query.filter_by(lib_name=library).first() + + if not the_lib: + abort(404) + + user = Users.query.filter_by(name=username).first() + user_in_the_lib = user_in_lib(user.id, the_lib) + if not user_in_the_lib: + return jsonify([]) + + other = OthersVideos.query.filter_by(library_name=the_lib.lib_name).all() + other_list = [video.__dict__ for video in other] + + merged_lib = LibrariesMerge.query.filter_by(parent_lib=library).all() + merged_lib = [child.child_lib for child in merged_lib] + + for lib in merged_lib: + other = OthersVideos.query.filter_by(library_name=lib).all() + other_list += [video.__dict__ for video in other] + + for video in other_list: + del video["_sa_instance_state"] + + return jsonify(other_list) + + +@app.route("/get_tv//") +def get_tv(tv_name, id): + if id != "undefined": + tv = Libraries.query.filter_by(lib_name=tv_name).first() + lib_folder = tv.lib_folder + + if is_valid_url(lib_folder): + m3u = requests.get(lib_folder).text + m3u = m3u.split("\n") + else: + with open(lib_folder, "r", encoding="utf-8") as f: + m3u = f.readlines() + m3u.pop(0) + for ligne in m3u: + if not ligne.startswith(("#EXTINF", "http")): + m3u.remove(ligne) + + if int(id) >= len(m3u): + return jsonify({"channel_url": "", "channel_name": ""}) + + line = m3u[int(id)] + next_line = m3u[int(id) + 1] + the_line = line + if the_line.startswith("#EXTINF"): + the_line = next_line + + try: + channel_name = line.split(",")[-1].replace("\n", "") + except IndexError: + channel_name = f"Channel {id}" + + if int(id) - 2 >= 0: + previous_id = int(id) - 2 + else: + previous_id = None + + if int(id) + 2 < len(m3u): + next_id = int(id) + 2 + else: + next_id = None + + return jsonify( + { + "channel_url": the_line, + "channel_name": channel_name, + "previous_id": previous_id, + "next_id": next_id, + } + ) + return jsonify( + {"channel_url": "", "channel_name": "", "error": "Channel not found"} + ) + + +@app.route("/get_channels/") +def get_channels(channels): + token = request.headers.get("Authorization") + check_authorization(request, token, channels) + + channels = Libraries.query.filter_by(lib_name=channels).first() + if not channels: + abort(404, "Library not found") + lib_folder = channels.lib_folder + + try: + with open(lib_folder, "r", encoding="utf-8") as f: + m3u = f.readlines() + except OSError: + lib_folder = lib_folder.replace("\\", "/") + m3u = requests.get(lib_folder).text + m3u = m3u.split("\n") + + m3u.pop(0) + while m3u[0] == "\n": + m3u.pop(0) + + channels = [] + for i in m3u: + if not i.startswith(("#EXTINF", "http")): + m3u.remove(i) + elif i == "\n": + m3u.remove(i) + for i in range(0, len(m3u) - 1, 2): + data = {} + try: + data["name"] = m3u[i].split(",")[-1].replace("\n", "") + work = True + except Exception: + work = False + if work: + data["url"] = m3u[i + 1].replace("\n", "") + data["channelID"] = i + tvg_id_regex = r'tvg-id="(.+?)"' + tvg_id = None + match = re.search(tvg_id_regex, m3u[i]) + if match: + tvg_id = match.group(1) + data["id"] = tvg_id + + tvg_logo_regex = r'tvg-logo="(.+?)"' + match = re.search(tvg_logo_regex, m3u[i]) + if match and match.group(1) != '" group-title=': + tvg_logo = match.group(1) + data["logo"] = tvg_logo + else: + broken_path = "" + data["logo"] = broken_path + + channels.append(data) + + channels = natsort.natsorted(channels, key=itemgetter(*["name"])) + return jsonify(channels) + + +@app.route("/search_tv//") +def search_tv(library, search): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + + library = Libraries.query.filter_by(lib_name=library).first() + if not library: + abort(404, "Library not found") + lib_folder = library.lib_folder + + try: + with open(lib_folder, "r", encoding="utf-8") as f: + m3u = f.readlines() + except OSError: + lib_folder = lib_folder.replace("\\", "/") + m3u = requests.get(lib_folder).text + m3u = m3u.split("\n") + + m3u.pop(0) + while m3u[0] == "\n": + m3u.pop(0) + + channels = [] + for i in m3u: + if not i.startswith(("#EXTINF", "http")): + m3u.remove(i) + elif i == "\n": + m3u.remove(i) + for i in range(0, len(m3u) - 1, 2): + data = {} + try: + data["name"] = m3u[i].split(",")[-1].replace("\n", "") + work = True + except Exception: + work = False + if work: + data["url"] = m3u[i + 1].replace("\n", "") + data["channelID"] = i + tvg_id_regex = r'tvg-id="(.+?)"' + tvg_id = None + match = re.search(tvg_id_regex, m3u[i]) + if match: + tvg_id = match.group(1) + data["id"] = tvg_id + + tvg_logo_regex = r'tvg-logo="(.+?)"' + match = re.search(tvg_logo_regex, m3u[i]) + if match and match.group(1) != '" group-title=': + tvg_logo = match.group(1) + data["logo"] = tvg_logo + else: + broken_path = "" + data["logo"] = broken_path + + channels.append(data) + + channels = natsort.natsorted(channels, key=itemgetter(*["name"])) + + search = search.lower() + search_terms = search.split(" ") + search_results = [] + + for channel in channels: + count = 0 + name = channel["name"].lower() + for term in search_terms: + if term in name: + count += 1 + if count > 0: + data = channel + data["count"] = count + search_results.append(data) + + search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) + + return jsonify(search_results) + + +@app.route("/search_tracks//") +def search_tracks(library, search): + tracks = Tracks.query.filter_by(library_name=library).all() + + search = search.lower() + search_terms = search.split(" ") + search_results = [] + + for track in tracks: + artist = Artists.query.filter_by(id=track.artist_id).first().name.lower() + if track.album_id: + album = Albums.query.filter_by(id=track.album_id).first().name.lower() + else: + album = "" + count = 0 + name = track.name.lower() + for term in search_terms: + if term in name: + count += 1 + if term in artist: + count += 1 + if term in album: + count += 1 + if count > 0: + data = track + data.count = count + data = data.__dict__ + del data["_sa_instance_state"] + search_results.append(data) + + search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) + + return jsonify(search_results) + + +@app.route("/search_albums//") +def search_albums(library, search): + albums = Albums.query.filter_by(library_name=library).all() + + search = search.lower() + search_terms = search.split(" ") + search_results = [] + + for album in albums: + artist = Artists.query.filter_by(id=album.artist_id).first().name.lower() + name = album.name.lower() + count = 0 + for term in search_terms: + if term in name: + count += 1 + if term in artist: + count += 1 + if count > 0: + data = album + data.count = count + data = data.__dict__ + del data["_sa_instance_state"] + search_results.append(data) + + search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) + + return jsonify(search_results) + + +@app.route("/search_artists//") +def search_artists(library, search): + artists = Artists.query.filter_by(library_name=library).all() + + search = search.lower() + search_terms = search.split(" ") + search_results = [] + + for artist in artists: + name = artist.name.lower() + count = 0 + for term in search_terms: + if term in name: + count += 1 + if count > 0: + data = artist + data.count = count + data = data.__dict__ + del data["_sa_instance_state"] + search_results.append(data) + + search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) + + return jsonify(search_results) + + +@app.route("/search_playlists//") +def search_playlists(library, search): + playlists = Playlists.query.filter_by(library_name=library).all() + + search = search.lower() + search_terms = search.split(" ") + search_results = [] + + for playlist in playlists: + tracks = playlist.tracks.split(",") + name = playlist.name.lower() + count = 0 + for term in search_terms: + if term in name: + count += 1 + for track in tracks: + track = Tracks.query.filter_by(id=track).first().name.lower() + if term in track: + count += 1 + if count > 0: + data = playlist + data.count = count + data = data.__dict__ + del data["_sa_instance_state"] + search_results.append(data) + + search_results = sorted(search_results, key=lambda k: k["count"], reverse=True) + + return jsonify(search_results) + + +def is_valid_url(url): + try: + response = requests.get(url) + return response.status_code == requests.codes.ok + except requests.exceptions.RequestException: + return False + + +@app.route("/get_all_consoles/") +def get_all_consoles(library): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + generate_log(request, "SUCCESS") + consoles_data = { + "GB": {"name": "Gameboy", "image": "/static/img/Gameboy.png"}, + "GBA": {"name": "Gameboy Advance", "image": "/static/img/Gameboy Advance.png"}, + "GBC": {"name": "Gameboy Color", "image": "/static/img/Gameboy Color.png"}, + "N64": {"name": "Nintendo 64", "image": "/static/img/N64.png"}, + "NES": { + "name": "Nintendo Entertainment System", + "image": "/static/img/NES.png", + }, + "NDS": {"name": "Nintendo DS", "image": "/static/img/Nintendo DS.png"}, + "SNES": { + "name": "Super Nintendo Entertainment System", + "image": "/static/img/SNES.png", + }, + "Sega Mega Drive": { + "name": "Sega Mega Drive", + "image": "/static/img/Sega Mega Drive.png", + }, + "Sega Master System": { + "name": "Sega Master System", + "image": "/static/img/Sega Master System.png", + }, + "Sega Saturn": {"name": "Sega Saturn", "image": "/static/img/Sega Saturn.png"}, + "PS1": {"name": "PS1", "image": "/static/img/PS1.png"}, + } + + consoles = Games.query.filter_by(library_name=library).all() + consoles_list = [console.__dict__ for console in consoles] + + merged_lib = LibrariesMerge.query.filter_by(parent_lib=library).all() + merged_lib = [child.child_lib for child in merged_lib] + + for lib in merged_lib: + consoles = Games.query.filter_by(library_name=lib).all() + consoles_list += [console.__dict__ for console in consoles] + + consoles_list_unique = [] + + for console in consoles_list: + data = { + "short_name": console.console, + "image": consoles_data[console.console]["image"], + "name": consoles_data[console.console]["name"], + } + if data not in consoles_list_unique: + consoles_list_unique.append(data) + + return jsonify(consoles_list_unique) + + +@app.route("/get_all_games//") +def get_all_games(lib, console_name): + token = request.headers.get("Authorization") + check_authorization(request, token, lib) + generate_log(request, "SUCCESS") + + games = Games.query.filter_by(console=console_name, library_name=lib).all() + + if not games: + return jsonify([]) + + games_list = [game.__dict__ for game in games] + for game in games_list: + del game["_sa_instance_state"] + return jsonify(games_list) + + +@app.route("/game_data//") +def game_data(lib, game_id): + game_id = Games.query.filter_by(id=game_id, library_name=lib).first() + if not game_id: + abort(404) + game_id = game_id.__dict__ + del game_id["_sa_instance_state"] + + return jsonify(game_id) + + +@app.route("/game_file//") +def game_file(lib, id): + if id is not None: + game = Games.query.filter_by(id=id, library_name=lib).first() + game = game.__dict__ + slug = game["slug"] + return send_file(slug, as_attachment=True) + + +@app.route("/bios/") +def bios(console): + if console is not None: + if not os.path.exists(f"{dir_path}/static/bios/{console}"): + abort(404) + bios = [ + i + for i in os.listdir(f"{dir_path}/static/bios/{console}") + if i.endswith(".bin") + ] + bios = f"{dir_path}/static/bios/{console}/{bios[0]}" + + if not os.path.exists(bios): + abort(404) + + return send_file(bios, as_attachment=True) + + +@app.route("/search_movies//") +def search_movies(library, search): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + + username = all_auth_tokens[token]["user"] + user_type = Users.query.filter_by(name=username).first() + + search = unidecode(search.replace("%20", " ").lower()) + search_terms = search.split() + + search = search.replace("%20", " ").lower() + search_terms = search.split() + + for term in search_terms: + if len(term) <= 3: + search_terms.remove(term) + + movies = Movies.query.filter_by(library_name=library).all() + results = {} + for movie in movies: + count = 0 + title = movie.title.lower() + real_title = movie.real_title.lower() + slug = movie.slug.lower() + description = movie.description.lower().split(" ") + casts = movie.cast.split(",") + cast_list = [] + for cast in casts: + cast_list.append(cast.name.lower()) + + cast = " ".join(cast_list) + date = str(movie.date).lower() + genre = movie.genre.lower() + alternatives_names = movie.alternatives_names.lower() + value_used = [title, real_title, slug, cast, date, genre, alternatives_names] + value_points = [2, 4, 3, 1, 0.5, 0.5, 1.5] + for term in search_terms: + for value in value_used: + index = value_used.index(value) + if term.lower() in value: + count += value_points[index] + for word in description: + if term == word.lower(): + count += 0.1 + if count > 0: + results[movie] = count + + results = sorted(results.items(), key=lambda x: x[1], reverse=True) + + movies = [i[0].__dict__ for i in results] + for i in movies: + del i["_sa_instance_state"] + + user_type = user_type.account_type + + if user_type in ["Kid", "Teen"]: + for movie in movies: + if movie["adult"] == "True": + movies.remove(movie) + return jsonify(movies) + + +@app.route("/search_series//") +def search_series(library, search): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + + username = all_auth_tokens[token]["user"] + + series = Series.query.filter_by(library_name=library).all() + user = Users.query.filter_by(name=username).first() + library = Libraries.query.filter_by(lib_name=library).first() + + search = unidecode(search.replace("%20", " ").lower()) + search_terms = search.split() + + results = [] + + for serie_dict in series: + count = 0 + name = unidecode(serie_dict.name.lower()) + original_name = unidecode(serie_dict.original_name.lower()) + description = unidecode(serie_dict.description.lower()) + cast = unidecode(serie_dict.cast.lower()) + date = unidecode(str(serie_dict.date).lower()) + genre = unidecode(serie_dict.genre.lower()) + + value_used = [name, original_name, description, cast, date, genre] + + for term in search_terms: + for value in value_used: + if term in value: + count += 1 + for word in description: + if term == word.lower(): + count += 1 + if count > 0: + serie_dict = serie_dict.__dict__ + serie_dict["count"] = count + del serie_dict["_sa_instance_state"] + results.append(serie_dict) + + results = sorted(results, key=lambda x: x["count"], reverse=True) + + user_type = user.account_type + + if user_type in ["Kid", "Teen"]: + for serie_dict in results: + if serie_dict["adult"] == "True": + results.remove(serie_dict) + + return jsonify(results) + + +@app.route("/search_books//") +def search_books(library, search): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + + books = Books.query.filter_by(library_name=library).all() + library = Libraries.query.filter_by(lib_name=library).first() + + search = unidecode(search.replace("%20", " ").lower()) + search_terms = search.split() + + results = [] + + for book in books: + count = 0 + title = unidecode(book.title.lower()) + slug = unidecode(book.slug.lower()) + book_type = unidecode(book.book_type.lower()) + cover = unidecode(book.cover.lower()) + + value_used = [title, slug, book_type, cover] + + for term in search_terms: + for value in value_used: + if term in value: + count += 1 + if count > 0: + results.append(book) + + books = [i.__dict__ for i in results] + for book in books: + del book["_sa_instance_state"] + + books = natsort.natsorted(books, key=itemgetter(*["title"])) + return jsonify(books) + + +@app.route("/search_others//") +def search_others(library, search): + token = request.headers.get("Authorization") + check_authorization(request, token, library) + + username = all_auth_tokens[token]["user"] + + search = search.replace("%20", " ").lower() + search_terms = search.split() + + others = OthersVideos.query.filter_by(library_name=library).all() + results = {} + for other in others: + count = 0 + video_hash = other.video_hash.lower() + title = other.title.lower() + slug = other.slug.lower() + + value_used = [title, slug, video_hash] + for term in search_terms: + for value in value_used: + if term in value: + count += 1 + if count > 0: + results[other] = count + + results = sorted(results.items(), key=lambda x: x[1], reverse=True) + + others = [i[0].__dict__ for i in results] + for i in others: + del i["_sa_instance_state"] + + user = Users.query.filter_by(name=username).first() + user_type = user.account_type + + if user_type in ["Kid", "Teen"]: + for other in others: + if other["adult"] == "True": + others.remove(other) + return jsonify(others) + + +@app.route("/set_vues_time_code/", methods=["POST"]) +def set_vues_time_code(): + time_code = request.get_json() + movie_id = time_code["movie_id"] + time_code = time_code["time_code"] + username = time_code["username"] + movie = Movies.query.filter_by(id=movie_id).first() + if movie is None: + abort(404) + + actual_vues = movie.vues + p = re.compile("(?") +def main_movie(movie_id): + movie_id = movie_id.replace(".m3u8", "") + movie = Movies.query.filter_by(id=movie_id).first() + video_path = movie.slug + video_properties = get_video_properties(video_path) + height = int(video_properties["height"]) + width = int(video_properties["width"]) + m3u8_file = "#EXTM3U\n\n" + + m3u8_file += generate_caption_movie(movie_id) + qualities = [144, 240, 360, 480, 720, 1080] + file = [] + for quality in qualities: + if quality < height: + new_width = int(quality) + new_height = int(float(width) / float(height) * new_width) + new_height += new_height % 2 + m3u8_line = f"#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH={new_width*new_height},CODECS=\"avc1.4d4033,mp4a.40.2\",AUDIO=\"audio\",RESOLUTION={new_height}x{new_width}\n/video_movie/{quality}/{movie_id}.m3u8\n" + file.append(m3u8_line) + last_line = f"#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH={width*height},CODECS=\"avc1.4d4033,mp4a.40.2\",AUDIO=\"audio\",RESOLUTION={width}x{height}\n/video_movie/{movie_id}.m3u8\n\n\n" + file.append(last_line) + file = "".join(file) + m3u8_file += file + response = make_response(m3u8_file) + + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{movie_id}.m3u8" + ) + return response + + +@app.route("/can_i_play_movie/") +def can_i_play_movie(movie_id): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + return jsonify({"can_I_play": False}) + else: + user = all_auth_tokens[token]["user"] + movie = Movies.query.filter_by(id=movie_id).first() + if movie is None: + abort(404) + + lib = movie.library_name + the_lib = Libraries.query.filter_by(lib_name=lib).first() + + if the_lib is None: + abort(404) + + if the_lib.available_for is not None: + if user not in the_lib.available_for: + return jsonify({"can_I_play": False}) + return jsonify({"can_I_play": True}) + + +@app.route("/can_i_play_episode/") +def can_i_play_episode(episode_id): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + return jsonify({"can_I_play": False}) + else: + user = all_auth_tokens[token]["user"] + + users = Users.query.filter_by(name=user).first() + + episode = Episodes.query.filter_by(episode_id=episode_id).first() + season = Seasons.query.filter_by(season_id=episode.season_id).first() + serie = Series.query.filter_by(id=season.serie).first() + + latest_episode_of_serie_exist = ( + LatestEpisodeWatched.query.filter_by( + serie_id=serie.id, user_id=users.id + ).first() + is not None + ) + + if latest_episode_of_serie_exist: + latest_episode_of_serie = LatestEpisodeWatched.query.filter_by( + serie_id=serie.id, user_id=users.id + ).first() + latest_episode_of_serie.episode_id = episode_id + DB.session.commit() + else: + latest_episode_of_serie = LatestEpisodeWatched( + serie_id=serie.id, user_id=users.id, episode_id=episode_id + ) + DB.session.add(latest_episode_of_serie) + DB.session.commit() + + if episode is None: + abort(404) + + lib = serie.library_name + the_lib = Libraries.query.filter_by(lib_name=lib).first() + + if the_lib is None: + abort(404) + + if the_lib.available_for is not None: + if user not in the_lib.available_for: + return jsonify({"can_I_play": False}) + return jsonify({"can_I_play": True}) + + +@app.route("/can_i_play_other_video/") +def can_i_play_other_video(video_hash): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + return jsonify({"can_I_play": False}) + else: + user = all_auth_tokens[token]["user"] + video = OthersVideos.query.filter_by(video_hash=video_hash).first() + if video is None: + return jsonify({"can_I_play": False}) + + lib = video.library_name + the_lib = Libraries.query.filter_by(lib_name=lib).first() + + if the_lib is None: + return jsonify({"can_I_play": False}) + + if the_lib.available_for is not None: + available_for = the_lib.available_for.split(",") + if user not in available_for: + return jsonify({"can_I_play": False}) + return jsonify({"can_I_play": True}) + + +@app.route("/main_serie/") +def main_serie(episode_id): + episode = Episodes.query.filter_by(episode_id=episode_id).first() + episode_path = episode.slug + + video_properties = get_video_properties(episode_path) + height = int(video_properties["height"]) + width = int(video_properties["width"]) + m3u8_file = "#EXTM3U\n\n" + # m3u8_file += generate_caption_serie(episode_id) + file = [] + qualities = [144, 240, 360, 480, 720, 1080] + for quality in qualities: + if quality < height: + new_width = int(quality) + new_height = int(float(width) / float(height) * new_width) + if (new_height % 2) != 0: + new_height += 1 + m3u8_line = f"#EXT-X-STREAM-INF:BANDWIDTH={new_width*new_width},RESOLUTION={new_height}x{new_width}\n/video_serie/{quality}/{episode_id}\n" + file.append(m3u8_line) + last_line = f"#EXT-X-STREAM-INF:BANDWIDTH={width*height},RESOLUTION={width}x{height}\n/video_serie/{episode_id}\n" + file.append(last_line) + file = file[::-1] + file = "".join(file) + m3u8_file += file + + response = make_response(m3u8_file) + + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{episode_id}.m3u8" + ) + return response + + +@app.route("/main_other/") +def main_other(other_hash): + movie = OthersVideos.query.filter_by(video_hash=other_hash).first() + video_path = movie.slug + video_properties = get_video_properties(video_path) + height = int(video_properties["height"]) + width = int(video_properties["width"]) + m3u8_file = "#EXTM3U\n\n" + qualities = [144, 240, 360, 480, 720, 1080] + file = [] + for quality in qualities: + if quality < height: + new_width = int(quality) + new_height = int(float(width) / float(height) * new_width) + if (new_height % 2) != 0: + new_height += 1 + m3u8_line = f"#EXT-X-STREAM-INF:BANDWIDTH={new_width*new_width},RESOLUTION={new_height}x{new_width}\n/video_other/{quality}/{other_hash}\n" + file.append(m3u8_line) + last_line = f"#EXT-X-STREAM-INF:BANDWIDTH={width*height},RESOLUTION={width}x{height}\n/video_other/{other_hash}\n" + file.append(last_line) + file = file[::-1] + file = "".join(file) + m3u8_file += file + response = make_response(m3u8_file) + + response.headers.set("Content-Type", "application/x-mpegURL") + response.headers.set("Range", "bytes=0-4095") + response.headers.set("Accept-Encoding", "*") + response.headers.set("Access-Control-Allow-Origin", "*") + response.headers.set( + "Content-Disposition", "attachment", filename=f"{other_hash}.m3u8" + ) + return response + + +def generate_caption_serie(episode_id): + episode = Episodes.query.filter_by(episode_id=episode_id).first() + slug = episode.slug + caption_command = [ + "ffprobe", + "-loglevel", + "error", + "-select_streams", + "s", + "-show_entries", + "stream=index:stream_tags=language", + "-of", + "csv=p=0", + slug, + ] + caption_pipe = subprocess.Popen(caption_command, stdout=subprocess.PIPE) + caption_response = caption_pipe.stdout.read().decode("utf-8") + caption_response = caption_response.split("\n") + + all_captions = [] + + caption_response.pop() + + for line in caption_response: + line = line.rstrip() + language = line.split(",")[1] + new_language = pycountry.languages.get(alpha_2=language) + index = line.split(",")[0] + try: + title_name = line.split(",")[2] + + try: + title_name = title_name.split(" : ")[0] + subtitle_type = title_name.split(" : ")[1] + except Exception: + title_name = title_name + subtitle_type = "Unknown" + + except Exception: + title_name = new_language + subtitle_type = "Unknown" + if subtitle_type.lower() != "pgs": + all_captions.append( + { + "index": index, + "languageCode": language, + "language": new_language, + "url": f"/chunk_caption_serie/{language}/{index}/{episode_id}.vtt", + "name": title_name, + } + ) + return all_captions + + +def generate_caption_movie(movie_id): + movie_path = Movies.query.filter_by(id=movie_id).first() + slug = movie_path.slug + + caption_command = [ + "ffprobe", + "-loglevel", + "error", + "-select_streams", + "s", + "-show_entries", + "stream=index,codec_name:stream_tags=language,title,handler_name,codec_name", + "-of", + "csv=p=0", + slug, + ] + + caption_pipe = subprocess.Popen(caption_command, stdout=subprocess.PIPE) + caption_response = caption_pipe.stdout.read().decode("utf-8") + caption_response = caption_response.split("\n") + caption_response.pop() + + all_captions = [] + for line in caption_response: + line = line.rstrip() + index = line.split(",")[0] + type = line.split(",")[1] + language = line.split(",")[2] + try: + title_name = line.split(",")[3] + except Exception: + title_name = language + + if type != "subrip": + continue + + all_captions.append( + { + "index": index, + "languageCode": language, + "language": title_name, + "url": f"/captionMovie/{movie_id}_{index}.m3u8", + "name": title_name, + } + ) + string = "" + + for caption in all_captions: + string += f'#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="{caption["language"]}",DEFAULT=NO,FORCED=NO,URI="{caption["url"]}",LANGUAGE="{caption["languageCode"]}"\n' + + return string + + +@app.route("/get_actor_data/", methods=["GET", "POST"]) +def get_actor_data(actor_id): + if actor_id == "undefined": + abort(404) + movies_data = [] + series_data = [] + actor = Actors.query.filter_by(actor_id=actor_id).first() + movies = actor.actor_programs.split(" ") + for movie in movies: + in_movies = Movies.query.filter_by(id=movie).first() is not None + in_series = Series.query.filter_by(id=movie).first() is not None + if in_movies: + this_movie = Movies.query.filter_by(id=movie).first().__dict__ + del this_movie["_sa_instance_state"] + if this_movie not in movies_data: + movies_data.append(this_movie) + elif in_series: + this_series = Series.query.filter_by(id=movie).first().__dict__ + del this_series["_sa_instance_state"] + if this_series not in series_data: + series_data.append(this_series) + + actor_data = { + "actor_name": actor.name, + "actor_image": f"/actor_image/{actor_id}", + "actor_description": actor.actor_description, + "actor_birthday": actor.actor_birth_date, + "actor_birthplace": actor.actor_birth_place, + "actor_movies": movies_data, + "actor_series": series_data, + } + return jsonify(actor_data) + + +@app.route("/get_this_episode_data/", methods=["GET", "POST"]) +def get_this_episode_data(episode_id): + episode = Episodes.query.filter_by(episode_id=episode_id).first() + episode_data = { + "episode_name": episode.episode_name, + "intro_start": episode.intro_start, + "intro_end": episode.intro_end, + } + return jsonify(episode_data, default=lambda o: o.__dict__) + + +@app.route("/is_chocolate", methods=["GET", "POST"]) +def is_chocolate(): + return jsonify({"is_chocolate": True}) + + +@app.route("/download_movie/") +def download_movie(movie_id): + can_download = config["ChocolateSettings"]["allowDownload"].lower() == "true" + if not can_download: + return jsonify({"error": "download not allowed"}) + movie = Movies.query.filter_by(id=movie_id).first() + movie_path = movie.slug + movie_library = movie.library_name + library = Libraries.query.filter_by(lib_name=movie_library).first() + library_path = library.lib_folder + movie_path = f"{library_path}/{movie_path}" + return send_file(movie_path, as_attachment=True) + + +@app.route("/download_episode/") +def download_episode(episode_id): + can_download = config["ChocolateSettings"]["allowDownload"].lower() == "true" + if not can_download: + return jsonify({"error": "download not allowed"}) + episode = Episodes.query.filter_by(episode_id=episode_id).first() + episode_path = episode.slug + return send_file(episode_path, as_attachment=True) + + +@app.route("/movie_cover/") +def movie_cover(id): + movie = Movies.query.filter_by(id=id).first() + movie_cover = movie.cover + return send_file(movie_cover, as_attachment=True) + + +@app.route("/movie_banner/") +def movie_banner(id): + movie = Movies.query.filter_by(id=id).first() + movie_banner = movie.banner + return send_file(movie_banner, as_attachment=True) + + +@app.route("/serie_cover/") +def serie_cover(id): + serie = Series.query.filter_by(id=id).first() + serie_cover = serie.cover + return send_file(serie_cover, as_attachment=True) + + +@app.route("/serie_banner/") +def serie_banner(id): + serie = Series.query.filter_by(id=id).first() + serie_banner = serie.banner + return send_file(serie_banner, as_attachment=True) + + +@app.route("/season_cover/") +def season_cover(id): + season = Seasons.query.filter_by(season_id=id).first() + season_cover = season.cover + return send_file(season_cover, as_attachment=True) + + +@app.route("/episode_cover/") +def episode_cover(id): + episode = Episodes.query.filter_by(episode_id=id).first() + episode_cover = episode.episode_cover_path + if "https://" in episode_cover: + response = requests.get(episode_cover) + img = Image.open(io.BytesIO(response.content)) + season_id = episode.season_id + img.save(f"{IMAGES_PATH}/{season_id}_{id}_Cover.webp", "webp") + episode_cover = f"{IMAGES_PATH}/{season_id}_{id}_Cover.webp" + episode.episode_cover_path = episode_cover + img.close() + DB.session.commit() + + return send_file(episode_cover, as_attachment=True) + + +@app.route("/other_cover/") +def other_cover(id): + other = OthersVideos.query.filter_by(video_hash=id).first() + other_cover = other.banner + return send_file(other_cover, as_attachment=True) + + +@app.route("/book_cover/") +def book_cover(id): + book = Books.query.filter_by(id=id).first() + book_cover = book.cover + return send_file(book_cover, as_attachment=True) + + +@app.route("/actor_image/") +def actor_image(id): + actor = Actors.query.filter_by(actor_id=id).first() + actor_image = actor.actor_image + if not actor or not os.path.exists(actor_image): + ext_to_ext = { + ".png": ".webp", + ".webp": ".png", + } + name, extension = os.path.splitext(actor_image) + new_extension = ext_to_ext[extension] + actor_image = f"{name}{new_extension}" + if not os.path.exists(actor_image): + actor.actor_image = ( + f"{dir_path}/static/img/avatars/defaultUserProfilePic.png" + ) + DB.session.commit() + return send_file( + f"{dir_path}/static/img/avatars/defaultUserProfilePic.png", + as_attachment=True, + ) + else: + actor.actor_image = actor_image + DB.session.commit() + return send_file(actor_image, as_attachment=True) + + +@app.route("/artist_image/") +def artist_image(id): + artist = Artists.query.filter_by(id=id).first() + artist_image = artist.cover + return send_file(artist_image, as_attachment=True) + + +@app.route("/album_cover/") +def album_cover(id): + album = Albums.query.filter_by(id=id).first() + album_cover = album.cover + return send_file(album_cover, as_attachment=True) + + +@app.route("/playlist_cover/") +def playlist_cover(id): + if id != "0": + playlist = Playlists.query.filter_by(id=id).first() + playlist_cover = playlist.cover + else: + playlist_cover = f"{dir_path}/static/img/likes.webp" + return send_file(playlist_cover, as_attachment=True) + + +@app.route("/track_cover/") +def track_cover(id): + track = Tracks.query.filter_by(id=id).first() + track_cover = track.cover + return send_file(track_cover, as_attachment=True) + + +@app.route("/user_image/") +def user_image(id): + user = Users.query.filter_by(id=id).first() + user_image = user.profil_picture + + if not user or not os.path.exists(user_image): + return send_file( + f"{dir_path}/static/img/avatars/defaultUserProfilePic.png", + as_attachment=True, + ) + + return send_file(user_image, as_attachment=True) + + +if __name__ == "__main__": + enabled_rpc = config["ChocolateSettings"]["discordrpc"] + if enabled_rpc == "true": + try: + RPC.update( + state="Loading Chocolate...", + details=f"The Universal MediaManager | ({last_commit_hash})", + large_image="loader", + large_text="Chocolate", + buttons=[ + { + "label": "Github", + "url": "https://github.com/ChocolateApp/Chocolate", + } + ], + start=start_time, + ) + except Exception: + pass + + with app.app_context(): + if not ARGUMENTS.no_scans and config["APIKeys"]["TMDB"] != "Empty": + libraries = Libraries.query.all() + libraries = [library.__dict__ for library in libraries] + + libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_name"])) + libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_type"])) + + type_to_call = { + "series": scans.getSeries, + "movies": scans.getMovies, + "consoles": scans.getGames, + "others": scans.getOthersVideos, + "books": scans.getBooks, + "musics": scans.getMusics, + } + + for library in libraries: + if library["lib_type"] in type_to_call: + type_to_call[library["lib_type"]](library["lib_name"]) + + print() + print("\033[?25h", end="") + + enabled_rpc = config["ChocolateSettings"]["discordrpc"] + if enabled_rpc == "true": + try: + RPC.update( + state="Idling", + details=f"The Universal MediaManager | ({last_commit_hash})", + large_image="largeimage", + large_text="Chocolate", + buttons=[ + { + "label": "Github", + "url": "https://github.com/ChocolateApp/Chocolate", + } + ], + start=time(), + ) + except Exception: + pass + + app.run(host="0.0.0.0", port="8888") diff --git a/src/chocolate_app/convert.py b/src/chocolate_app/convert.py index 2a96a7a..c648356 100644 --- a/src/chocolate_app/convert.py +++ b/src/chocolate_app/convert.py @@ -1,48 +1,48 @@ -import os, subprocess - -# Définissez le chemin du dossier contenant les fichiers vidéos à réduire -# par exemple : -#folder_path = "C:\\Videos\\" -folder_path = r"E:\\Séries\\The Mentalist" - -# Itérez sur tous les dossiers dans le dossier -allSeasons = [ f for f in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path,f)) ] -print(folder_path) -print(allSeasons) -for seasons in allSeasons: - allEpisodes = [ f for f in os.listdir(os.path.join(folder_path,seasons)) if os.path.isfile(os.path.join(folder_path,seasons,f)) ] - print(allEpisodes) - for filename in allEpisodes: - # Vérifiez que le fichier est une vidéo en utilisant son extension de fichier - if (filename.endswith(".mp4") or filename.endswith(".mkv") or filename.endswith(".avi")): - # Construisez le chemin complet du fichier vidéo en utilisant le chemin du dossier et le nom de fichier - filepath = f"{folder_path}\\{seasons}\\{filename}" - newFilepath, file_extension = os.path.splitext(filepath) - newFilepath += f"_compressed.{file_extension}" - - # Utilisez ffmpeg pour réduire la taille du fichier vidéo en utilisant un taux de bits constant - command = [ - "ffmpeg", - "-i", - filepath, - "-c", - "copy", - "-c:v", - "h264_nvenc", - "-qp", - "0", - "-c:a", - "copy", - "-y", - "-vsync", - "0", - "-crf", - "22", - "-pix_fmt", - "yuv420p", - "-b:v", - "5M", - f"{newFilepath}" - ] - +import os, subprocess + +# Définissez le chemin du dossier contenant les fichiers vidéos à réduire +# par exemple : +#folder_path = "C:\\Videos\\" +folder_path = r"E:\\Séries\\The Mentalist" + +# Itérez sur tous les dossiers dans le dossier +allSeasons = [ f for f in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path,f)) ] +print(folder_path) +print(allSeasons) +for seasons in allSeasons: + allEpisodes = [ f for f in os.listdir(os.path.join(folder_path,seasons)) if os.path.isfile(os.path.join(folder_path,seasons,f)) ] + print(allEpisodes) + for filename in allEpisodes: + # Vérifiez que le fichier est une vidéo en utilisant son extension de fichier + if (filename.endswith(".mp4") or filename.endswith(".mkv") or filename.endswith(".avi")): + # Construisez le chemin complet du fichier vidéo en utilisant le chemin du dossier et le nom de fichier + filepath = f"{folder_path}\\{seasons}\\{filename}" + newFilepath, file_extension = os.path.splitext(filepath) + newFilepath += f"_compressed.{file_extension}" + + # Utilisez ffmpeg pour réduire la taille du fichier vidéo en utilisant un taux de bits constant + command = [ + "ffmpeg", + "-i", + filepath, + "-c", + "copy", + "-c:v", + "h264_nvenc", + "-qp", + "0", + "-c:a", + "copy", + "-y", + "-vsync", + "0", + "-crf", + "22", + "-pix_fmt", + "yuv420p", + "-b:v", + "5M", + f"{newFilepath}" + ] + subprocess.run(command) \ No newline at end of file diff --git a/src/chocolate_app/empty_config.ini b/src/chocolate_app/empty_config.ini index 392c7de..d66f4d1 100644 --- a/src/chocolate_app/empty_config.ini +++ b/src/chocolate_app/empty_config.ini @@ -1,26 +1,26 @@ -[ChocolateSettings] -language = EN -askwhichserie = false -askwhichmovie = false -compressps1games = true -discordrpc = false -allowdownload = false - -[ARRSettings] -radarrfolder = Empty -sonarrfolder = Empty -lidarrfolder = Empty -readarrfolder = Empty -radarrurl = Empty -sonarrurl = Empty -lidarrurl = Empty -readarrurl = Empty - -[APIKeys] -tmdb = Empty -igdbid = Empty -igdbsecret = Empty -radarr = Empty -sonarr = Empty -lidarr = Empty +[ChocolateSettings] +language = EN +askwhichserie = false +askwhichmovie = false +compressps1games = true +discordrpc = false +allowdownload = false + +[ARRSettings] +radarrfolder = Empty +sonarrfolder = Empty +lidarrfolder = Empty +readarrfolder = Empty +radarrurl = Empty +sonarrurl = Empty +lidarrurl = Empty +readarrurl = Empty + +[APIKeys] +tmdb = Empty +igdbid = Empty +igdbsecret = Empty +radarr = Empty +sonarr = Empty +lidarr = Empty readarr = Empty \ No newline at end of file diff --git a/src/chocolate_app/routes/arr.py b/src/chocolate_app/routes/arr.py index 7eda2a0..8a00af9 100644 --- a/src/chocolate_app/routes/arr.py +++ b/src/chocolate_app/routes/arr.py @@ -1,232 +1,232 @@ -from flask import Blueprint, jsonify, request -from pyarr import LidarrAPI, RadarrAPI, ReadarrAPI, SonarrAPI -from tmdbv3api import Find - -from chocolate_app import config - -arr_bp = Blueprint("arr", __name__) - -@arr_bp.route("/lookup", methods=["POST"]) -def lookup(): - json_file = request.get_json() - media_type = json_file["mediaType"] - query = json_file["query"] - - if media_type == "movie": - radarr_api_key = config["APIKeys"]["radarr"] - radarr_url = config["ARRSettings"]["radarrurl"] - radarr = RadarrAPI(radarr_url, radarr_api_key) - search_results = radarr.lookup_movie(query) - return jsonify(search_results) - elif media_type == "serie": - sonarr_api_key = config["APIKeys"]["sonarr"] - sonarr_url = config["ARRSettings"]["sonarrurl"] - sonarr = SonarrAPI(sonarr_url, sonarr_api_key) - search_results = sonarr.lookup_series(query) - return jsonify(search_results) - elif media_type == "music": - lidarr_api_key = config["APIKeys"]["lidarr"] - lidarr_url = config["ARRSettings"]["lidarrurl"] - lidarr = LidarrAPI(lidarr_url, lidarr_api_key) - search_results = lidarr.lookup(query) - return jsonify(search_results) - elif media_type == "book": - readarr_api_key = config["APIKeys"]["readarr"] - readarr_url = config["ARRSettings"]["readarrurl"] - readarr = ReadarrAPI(readarr_url, readarr_api_key) - search_results = readarr.lookup_book(term=query) - return jsonify(search_results) - - -@arr_bp.route("/list_qualities/", methods=["GET"]) -def list_qualities(media_type): - if media_type == "movie": - radarr_api_key = config["APIKeys"]["radarr"] - radarr_url = config["ARRSettings"]["radarrurl"] - radarr = RadarrAPI(radarr_url, radarr_api_key) - quality_list = radarr.get_quality_profile() - - real_quality_list = [] - - for quality in quality_list: - real_quality_list.append({"id": quality["id"], "name": quality["name"]}) - - # order the list by name - real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) - - return jsonify(real_quality_list) - elif media_type == "serie": - sonarr_api_key = config["APIKeys"]["sonarr"] - sonarr_url = config["ARRSettings"]["sonarrurl"] - sonarr = SonarrAPI(sonarr_url, sonarr_api_key) - quality_list = sonarr.get_quality_profile() - - real_quality_list = [] - - for quality in quality_list: - real_quality_list.append({"id": quality["id"], "name": quality["name"]}) - - # order the list by name - real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) - - return jsonify(real_quality_list) - - elif media_type == "music": - lidarr_api_key = config["APIKeys"]["lidarr"] - lidarr_url = config["ARRSettings"]["lidarrurl"] - lidarr = LidarrAPI(lidarr_url, lidarr_api_key) - quality_list = lidarr.get_quality_profile() - - real_quality_list = [] - - for quality in quality_list: - real_quality_list.append({"id": quality["id"], "name": quality["name"]}) - - # order the list by name - real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) - - return jsonify(real_quality_list) - - elif media_type == "book": - readarr_api_key = config["APIKeys"]["readarr"] - readarr_url = config["ARRSettings"]["readarrurl"] - readarr = ReadarrAPI(readarr_url, readarr_api_key) - quality_list = readarr.get_quality_profile() - - real_quality_list = [] - - for quality in quality_list: - real_quality_list.append({"id": quality["id"], "name": quality["name"]}) - - # order the list by name - real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) - - return jsonify(real_quality_list) - - return jsonify( - [ - { - "id": 1, - "name": "There's not quality profile, you must create one in the app", - } - ] - ) - - -@arr_bp.route("/list_language_profiles/", methods=["GET"]) -def list_language_profiles(media_type): - if media_type == "serie": - sonarr_api_key = config["APIKeys"]["sonarr"] - sonarr_url = config["ARRSettings"]["sonarrurl"] - sonarr = SonarrAPI(sonarr_url, sonarr_api_key) - languages = sonarr.get_language_profile() - real_languages = [] - saved_ids = [] - for language in languages: - the_languages = language["languages"] - for the_language in the_languages: - if the_language["allowed"]: - if the_language["language"]["id"] not in saved_ids: - saved_ids.append(the_language["language"]["id"]) - real_languages.append(the_language["language"]) - return jsonify(real_languages) - return jsonify( - [ - { - "id": 1, - "name": "There's not language profile, you must create one in the app", - } - ] - ) - - -@arr_bp.route("/add_media", methods=["POST"]) -def add_media(): - media_type = request.get_json()["mediaType"] - media_id = request.get_json()["ID"] - quality_profile = request.get_json()["qualityID"] - term = request.get_json()["term"] - - if media_type == "movie": - radarr_folder = config["ARRSettings"]["radarrFolder"] - radarr_api_key = config["APIKeys"]["radarr"] - radarr_url = config["ARRSettings"]["radarrurl"] - radarr = RadarrAPI(radarr_url, radarr_api_key) - # get all quality : print(radarr.get_quality_profile()) - movie = radarr.lookup_movie(term=term)[int(media_id)] - radarr.add_movie( - movie=movie, quality_profile_id=int(quality_profile), root_dir=radarr_folder - ) - elif media_type == "serie": - language_id = request.get_json()["languageId"] - sonarr_folder = config["ARRSettings"]["sonarrFolder"] - sonarr_api_key = config["APIKeys"]["sonarr"] - sonarr_url = config["ARRSettings"]["sonarrurl"] - language_id = request.get_json()["languageId"] - sonarr = SonarrAPI(sonarr_url, sonarr_api_key) - serie = sonarr.lookup_series(term=term)[int(media_id)] - sonarr.add_series( - series=serie, - quality_profile_id=int(quality_profile), - root_dir=sonarr_folder, - language_profile_id=int(language_id), - ) - elif media_type == "music": - file_type = request.get_json()["type"] - lidarr_folder = config["ARRSettings"]["lidarrFolder"] - lidarr_api_key = config["APIKeys"]["lidarr"] - lidarr_url = config["ARRSettings"]["lidarrurl"] - lidarr = LidarrAPI(lidarr_url, lidarr_api_key) - # print(f"mediaID: {mediaID} | quality_profile: {quality_profile} | lidarrFolder: {lidarrFolder}") - if file_type == "album": - album = lidarr.lookup(term=term)[int(media_id)]["album"] - add_album = lidarr.add_album( - album=album, - quality_profile_id=int(quality_profile), - root_dir=lidarr_folder, - ) - print(add_album) - elif file_type == "artist": - artist = lidarr.lookup(term=term)[int(media_id)] - lidarr.add_artist( - artist=artist, - quality_profile_id=int(quality_profile), - root_dir=lidarr_folder, - ) - elif media_type == "book": - readarr_folder = config["ARRSettings"]["readarrFolder"] - readarr_api_key = config["APIKeys"]["readarr"] - readarr_url = config["ARRSettings"]["readarrurl"] - readarr = ReadarrAPI(readarr_url, readarr_api_key) - - readarr.add_book( - db_id=int(media_id), - quality_profile_id=int(quality_profile), - root_dir=readarr_folder, - book_id_type="goodreads", - ) - - return jsonify({"status": "ok"}) - - -@arr_bp.route("/get_tmdb_poster", methods=["POST"]) -def get_imdb_poster(): - json_file = request.get_json() - if "imdbId" in json_file: - imdb_id = json_file["imdbId"] - find = Find() - media = find.find_by_imdb_id(imdb_id) - url = "" - if media: - try: - for movie in media["movie_results"]: - url = f"https://www.themoviedb.org/t/p/w600_and_h900_bestv2{movie['poster_path']}" - break - for serie in media["tv_results"]: - url = f"https://www.themoviedb.org/t/p/w600_and_h900_bestv2{serie['poster_path']}" - break - except Exception: - url = "/static/img/broken.webp" - return jsonify({"url": url}) - else: - return jsonify({"url": "/static/img/broken.webp"}) +from flask import Blueprint, jsonify, request +from pyarr import LidarrAPI, RadarrAPI, ReadarrAPI, SonarrAPI +from tmdbv3api import Find + +from chocolate_app import config + +arr_bp = Blueprint("arr", __name__) + +@arr_bp.route("/lookup", methods=["POST"]) +def lookup(): + json_file = request.get_json() + media_type = json_file["mediaType"] + query = json_file["query"] + + if media_type == "movie": + radarr_api_key = config["APIKeys"]["radarr"] + radarr_url = config["ARRSettings"]["radarrurl"] + radarr = RadarrAPI(radarr_url, radarr_api_key) + search_results = radarr.lookup_movie(query) + return jsonify(search_results) + elif media_type == "serie": + sonarr_api_key = config["APIKeys"]["sonarr"] + sonarr_url = config["ARRSettings"]["sonarrurl"] + sonarr = SonarrAPI(sonarr_url, sonarr_api_key) + search_results = sonarr.lookup_series(query) + return jsonify(search_results) + elif media_type == "music": + lidarr_api_key = config["APIKeys"]["lidarr"] + lidarr_url = config["ARRSettings"]["lidarrurl"] + lidarr = LidarrAPI(lidarr_url, lidarr_api_key) + search_results = lidarr.lookup(query) + return jsonify(search_results) + elif media_type == "book": + readarr_api_key = config["APIKeys"]["readarr"] + readarr_url = config["ARRSettings"]["readarrurl"] + readarr = ReadarrAPI(readarr_url, readarr_api_key) + search_results = readarr.lookup_book(term=query) + return jsonify(search_results) + + +@arr_bp.route("/list_qualities/", methods=["GET"]) +def list_qualities(media_type): + if media_type == "movie": + radarr_api_key = config["APIKeys"]["radarr"] + radarr_url = config["ARRSettings"]["radarrurl"] + radarr = RadarrAPI(radarr_url, radarr_api_key) + quality_list = radarr.get_quality_profile() + + real_quality_list = [] + + for quality in quality_list: + real_quality_list.append({"id": quality["id"], "name": quality["name"]}) + + # order the list by name + real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) + + return jsonify(real_quality_list) + elif media_type == "serie": + sonarr_api_key = config["APIKeys"]["sonarr"] + sonarr_url = config["ARRSettings"]["sonarrurl"] + sonarr = SonarrAPI(sonarr_url, sonarr_api_key) + quality_list = sonarr.get_quality_profile() + + real_quality_list = [] + + for quality in quality_list: + real_quality_list.append({"id": quality["id"], "name": quality["name"]}) + + # order the list by name + real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) + + return jsonify(real_quality_list) + + elif media_type == "music": + lidarr_api_key = config["APIKeys"]["lidarr"] + lidarr_url = config["ARRSettings"]["lidarrurl"] + lidarr = LidarrAPI(lidarr_url, lidarr_api_key) + quality_list = lidarr.get_quality_profile() + + real_quality_list = [] + + for quality in quality_list: + real_quality_list.append({"id": quality["id"], "name": quality["name"]}) + + # order the list by name + real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) + + return jsonify(real_quality_list) + + elif media_type == "book": + readarr_api_key = config["APIKeys"]["readarr"] + readarr_url = config["ARRSettings"]["readarrurl"] + readarr = ReadarrAPI(readarr_url, readarr_api_key) + quality_list = readarr.get_quality_profile() + + real_quality_list = [] + + for quality in quality_list: + real_quality_list.append({"id": quality["id"], "name": quality["name"]}) + + # order the list by name + real_quality_list = sorted(real_quality_list, key=lambda k: k["name"].lower()) + + return jsonify(real_quality_list) + + return jsonify( + [ + { + "id": 1, + "name": "There's not quality profile, you must create one in the app", + } + ] + ) + + +@arr_bp.route("/list_language_profiles/", methods=["GET"]) +def list_language_profiles(media_type): + if media_type == "serie": + sonarr_api_key = config["APIKeys"]["sonarr"] + sonarr_url = config["ARRSettings"]["sonarrurl"] + sonarr = SonarrAPI(sonarr_url, sonarr_api_key) + languages = sonarr.get_language_profile() + real_languages = [] + saved_ids = [] + for language in languages: + the_languages = language["languages"] + for the_language in the_languages: + if the_language["allowed"]: + if the_language["language"]["id"] not in saved_ids: + saved_ids.append(the_language["language"]["id"]) + real_languages.append(the_language["language"]) + return jsonify(real_languages) + return jsonify( + [ + { + "id": 1, + "name": "There's not language profile, you must create one in the app", + } + ] + ) + + +@arr_bp.route("/add_media", methods=["POST"]) +def add_media(): + media_type = request.get_json()["mediaType"] + media_id = request.get_json()["ID"] + quality_profile = request.get_json()["qualityID"] + term = request.get_json()["term"] + + if media_type == "movie": + radarr_folder = config["ARRSettings"]["radarrFolder"] + radarr_api_key = config["APIKeys"]["radarr"] + radarr_url = config["ARRSettings"]["radarrurl"] + radarr = RadarrAPI(radarr_url, radarr_api_key) + # get all quality : print(radarr.get_quality_profile()) + movie = radarr.lookup_movie(term=term)[int(media_id)] + radarr.add_movie( + movie=movie, quality_profile_id=int(quality_profile), root_dir=radarr_folder + ) + elif media_type == "serie": + language_id = request.get_json()["languageId"] + sonarr_folder = config["ARRSettings"]["sonarrFolder"] + sonarr_api_key = config["APIKeys"]["sonarr"] + sonarr_url = config["ARRSettings"]["sonarrurl"] + language_id = request.get_json()["languageId"] + sonarr = SonarrAPI(sonarr_url, sonarr_api_key) + serie = sonarr.lookup_series(term=term)[int(media_id)] + sonarr.add_series( + series=serie, + quality_profile_id=int(quality_profile), + root_dir=sonarr_folder, + language_profile_id=int(language_id), + ) + elif media_type == "music": + file_type = request.get_json()["type"] + lidarr_folder = config["ARRSettings"]["lidarrFolder"] + lidarr_api_key = config["APIKeys"]["lidarr"] + lidarr_url = config["ARRSettings"]["lidarrurl"] + lidarr = LidarrAPI(lidarr_url, lidarr_api_key) + # print(f"mediaID: {mediaID} | quality_profile: {quality_profile} | lidarrFolder: {lidarrFolder}") + if file_type == "album": + album = lidarr.lookup(term=term)[int(media_id)]["album"] + add_album = lidarr.add_album( + album=album, + quality_profile_id=int(quality_profile), + root_dir=lidarr_folder, + ) + print(add_album) + elif file_type == "artist": + artist = lidarr.lookup(term=term)[int(media_id)] + lidarr.add_artist( + artist=artist, + quality_profile_id=int(quality_profile), + root_dir=lidarr_folder, + ) + elif media_type == "book": + readarr_folder = config["ARRSettings"]["readarrFolder"] + readarr_api_key = config["APIKeys"]["readarr"] + readarr_url = config["ARRSettings"]["readarrurl"] + readarr = ReadarrAPI(readarr_url, readarr_api_key) + + readarr.add_book( + db_id=int(media_id), + quality_profile_id=int(quality_profile), + root_dir=readarr_folder, + book_id_type="goodreads", + ) + + return jsonify({"status": "ok"}) + + +@arr_bp.route("/get_tmdb_poster", methods=["POST"]) +def get_imdb_poster(): + json_file = request.get_json() + if "imdbId" in json_file: + imdb_id = json_file["imdbId"] + find = Find() + media = find.find_by_imdb_id(imdb_id) + url = "" + if media: + try: + for movie in media["movie_results"]: + url = f"https://www.themoviedb.org/t/p/w600_and_h900_bestv2{movie['poster_path']}" + break + for serie in media["tv_results"]: + url = f"https://www.themoviedb.org/t/p/w600_and_h900_bestv2{serie['poster_path']}" + break + except Exception: + url = "/static/img/broken.webp" + return jsonify({"url": url}) + else: + return jsonify({"url": "/static/img/broken.webp"}) diff --git a/src/chocolate_app/routes/libraries.py b/src/chocolate_app/routes/libraries.py index 96e6205..c889331 100644 --- a/src/chocolate_app/routes/libraries.py +++ b/src/chocolate_app/routes/libraries.py @@ -1,323 +1,327 @@ -import json -import natsort - -from flask import Blueprint, jsonify, request, abort -from operator import itemgetter - -from chocolate_app import DB, all_auth_tokens -from chocolate_app.tables import ( - Libraries, - LibrariesMerge, - Users, - Movies, - Series, - Seasons, - Episodes, - Games, - OthersVideos, -) -import chocolate_app.scans as scans -from ..utils.utils import generate_log - -libraries_bp = Blueprint("libraries", __name__) - - -@libraries_bp.route("/get_all_libraries", methods=["GET"]) -def get_all_libraries(): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - abort(401) - - user = all_auth_tokens[token]["user"] - user = Users.query.filter_by(name=user).first() - - libraries = Libraries.query.filter_by().all() - libraries_list = [library.__dict__ for library in libraries] - for library in libraries_list: - del library["_sa_instance_state"] - if user.account_type != "Admin": - for library in libraries_list: - if library["available_for"] is not None: - available_for = str(library["available_for"]).split(",") - if str(user.id) not in available_for: - libraries_list.remove(library) - - libraries = sorted(libraries_list, key=lambda k: k["lib_name"].lower()) - libraries = sorted(libraries_list, key=lambda k: k["lib_type"].lower()) - - for library in libraries: - child_libs = LibrariesMerge.query.filter_by( - parent_lib=library["lib_name"] - ).all() - child_libs = [child.child_lib for child in child_libs] - for child in child_libs: - for lib in libraries: - if lib["lib_name"] == child: - libraries.remove(lib) - - generate_log(request, "SERVER") - - return jsonify(libraries) - - -@libraries_bp.route("/get_all_libraries_created") -def get_all_libraries_created(): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - abort(401) - - user = all_auth_tokens[token]["user"] - user = Users.query.filter_by(name=user).first() - - libraries = Libraries.query.filter_by().all() - libraries_list = [library.__dict__ for library in libraries] - for library in libraries_list: - del library["_sa_instance_state"] - # check if lib already have a parent - parent = LibrariesMerge.query.filter_by(child_lib=library["lib_name"]).first() - if parent is not None: - library["merge_parent"] = parent.parent_lib - # if lib is a parent, can't be a child - child = LibrariesMerge.query.filter_by(parent_lib=library["lib_name"]).first() - if child is None: - library_type = library["lib_type"] - # for all lib of the same type, remove the actual lib, and add all the lib to "possible_merge_parent" - for lib in libraries_list: - is_child = LibrariesMerge.query.filter_by( - child_lib=lib["lib_name"] - ).first() - if ( - lib["lib_type"] == library_type - and lib["lib_name"] != library["lib_name"] - and is_child is None - ): - if "possible_merge_parent" not in library: - library["possible_merge_parent"] = [] - data = {"value": lib["lib_name"], "text": lib["lib_name"]} - library["possible_merge_parent"].append(data) - if user.account_type != "Admin": - for library in libraries_list: - if library["available_for"] is not None: - available_for = str(library["available_for"]).split(",") - if str(user.id) not in available_for: - libraries_list.remove(library) - - generate_log(request, "SERVER") - - return jsonify(libraries) - - -@libraries_bp.route("/create_library", methods=["POST"]) -def create_lib(): - the_request = request.get_json() - the_request = json.loads(the_request) - lib_name = the_request["lib_name"] - lib_path = the_request["lib_path"] - lib_type = the_request["lib_type"] - lib_users = the_request["lib_users"] - - if lib_users == "": - lib_users = None - - icons = { - "movies": "film", - "series": "videocam", - "consoles": "game-controller", - "tv": "tv", - "others": "desktop", - "books": "book", - "musics": "headset", - } - - function_to_call = { - "movies": scans.getMovies, - "series": scans.getSeries, - "consoles": scans.getGames, - "others": scans.getOthersVideos, - "books": scans.getBooks, - "musics": scans.getMusics, - } - - lib_path = lib_path.replace("\\", "/") - - exists = Libraries.query.filter_by(lib_name=lib_name).first() is not None - if not exists: - new_lib = Libraries( - lib_name=lib_name, - lib_folder=lib_path, - lib_type=lib_type, - lib_image=icons[lib_type], - available_for=lib_users, - ) - DB.session.add(new_lib) - DB.session.commit() - try: - function_to_call[lib_type](lib_name) - except Exception: - pass - - return jsonify({"error": "worked"}) - else: - abort(409) - - -@libraries_bp.route("/edit_library", methods=["POST"]) -def edit_lib(): - token = request.headers.get("Authorization") - if token not in all_auth_tokens: - abort(401) - - the_request = request.get_json() - default_path = the_request["default_path"] - lib_name = the_request["name"] - lib_path = the_request["path"] - lib_type = the_request["type"] - lib_users = the_request["users"] - merge_parent = the_request["merge_parent"] - - merge_libraries(merge_parent, lib_name) - - lib_path = lib_path.replace("\\", "/") - - lib = Libraries.query.filter_by(lib_folder=default_path).first() - if lib is None: - abort(404) - - if lib_path is not None: - lib.lib_folder = lib_path - if lib_type is not None: - lib.lib_type = lib_type - if lib_users is not None: - if len(lib_users.split(",")) == 1: - lib_users = int(lib_users.replace('"', "")) - lib.available_for = lib_users - DB.session.commit() - return jsonify({"error": "worked"}) - - -@libraries_bp.route("/delete_library", methods=["POST"]) -def delete_lib(): - the_request = request.get_json() - - lib_name = the_request["name"] - lib = Libraries.query.filter_by(lib_name=lib_name).first() - - if lib is None: - abort(404) - - DB.session.delete(lib) - - lib_type = lib.lib_type - - if lib_type == "movies": - all_movies = Movies.query.filter_by(library_name=lib_name).all() - for movie in all_movies: - DB.session.delete(movie) - elif lib_type == "series": - all_series = Series.query.filter_by(library_name=lib_name).all() - for serie in all_series: - seasons = Seasons.query.filter_by(serie=serie.id).all() - for season in seasons: - episodes = Episodes.query.filter_by(season_id=season.season_id).all() - for episode in episodes: - DB.session.delete(episode) - DB.session.delete(season) - DB.session.delete(serie) - elif lib_type == "consoles": - all_games = Games.query.filter_by(library_name=lib_name).all() - for game in all_games: - DB.session.delete(game) - elif lib_type == "others": - all_other = OthersVideos.query.filter_by(library_name=lib_name).all() - for other in all_other: - DB.session.delete(other) - - DB.session.commit() - return jsonify({"error": "worked"}) - - -@libraries_bp.route("/rescan_all", methods=["POST"]) -def rescan_all(): - libraries = Libraries.query.all() - libraries = [library.__dict__ for library in libraries] - - libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_name"])) - libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_type"])) - - type_to_call = { - "series": scans.getSeries, - "movies": scans.getMovies, - "consoles": scans.getGames, - "others": scans.getOthersVideos, - "books": scans.getBooks, - "musics": scans.getMusics, - } - - for library in libraries: - type_to_call[library["lib_type"]](library["lib_name"]) - return jsonify(True) - - -@libraries_bp.route("/rescan/", methods=["POST"]) -def rescan(library): - exists = Libraries.query.filter_by(lib_name=library).first() is not None - - type_to_call = { - "series": scans.getSeries, - "movies": scans.getMovies, - "consoles": scans.getGames, - "others": scans.getOthersVideos, - "books": scans.getBooks, - "musics": scans.getMusics, - } - - if exists: - library = Libraries.query.filter_by(lib_name=library).first().__dict__ - merges = LibrariesMerge.query.filter_by(parent_lib=library["lib_name"]).all() - for merge in merges: - child = Libraries.query.filter_by(lib_name=merge.child_lib).first() - type_to_call[child.lib_type](child.lib_name) - type_to_call[library["lib_type"]](library["lib_name"]) - return jsonify(True) - return jsonify(False) - - -def merge_libraries(parent, child): - if not child: - return - - if not parent: - merge = LibrariesMerge.query.filter_by(child_lib=child).first() - if merge is not None: - DB.session.delete(merge) - DB.session.commit() - return - - parent = Libraries.query.filter_by(lib_name=parent).first() - if parent is None: - return - - child = Libraries.query.filter_by(lib_name=child).first() - if child is None: - return - - if parent.lib_type != child.lib_type: - return - - exist = LibrariesMerge.query.filter_by( - parent_lib=parent.lib_name, child_lib=child.lib_name - ).first() - # child is already a parent - is_parent = LibrariesMerge.query.filter_by(parent_lib=child.lib_name).first() - - if exist is None and is_parent is None: - fusion = LibrariesMerge(parent_lib=parent.lib_name, child_lib=child.lib_name) - DB.session.add(fusion) - DB.session.commit() - elif is_parent is None: - fusion = LibrariesMerge.query.filter_by( - parent_lib=parent.lib_name, child_lib=child.lib_name - ).first() - DB.session.delete(fusion) - DB.session.commit() - return +import json +import natsort + +from flask import Blueprint, jsonify, request, abort +from operator import itemgetter + +from chocolate_app import DB, all_auth_tokens +from chocolate_app.tables import ( + Libraries, + LibrariesMerge, + Users, + Movies, + Series, + Seasons, + Episodes, + Games, + OthersVideos, +) +import chocolate_app.scans as scans +from ..utils.utils import generate_log + +libraries_bp = Blueprint("libraries", __name__) + + +@libraries_bp.route("/get_all_libraries", methods=["GET"]) +def get_all_libraries(): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + abort(401) + + user = all_auth_tokens[token]["user"] + user = Users.query.filter_by(name=user).first() + + libraries = Libraries.query.filter_by().all() + libraries_list = [library.__dict__ for library in libraries] + for library in libraries_list: + del library["_sa_instance_state"] + if user.account_type != "Admin": + for library in libraries_list: + if library["available_for"] is not None: + available_for = str(library["available_for"]).split(",") + if str(user.id) not in available_for: + libraries_list.remove(library) + + libraries = sorted(libraries_list, key=lambda k: k["lib_name"].lower()) + libraries = sorted(libraries_list, key=lambda k: k["lib_type"].lower()) + + for library in libraries: + child_libs = LibrariesMerge.query.filter_by( + parent_lib=library["lib_name"] + ).all() + child_libs = [child.child_lib for child in child_libs] + for child in child_libs: + for lib in libraries: + if lib["lib_name"] == child: + libraries.remove(lib) + + generate_log(request, "SERVER") + + return jsonify(libraries) + + +@libraries_bp.route("/get_all_libraries_created") +def get_all_libraries_created(): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + abort(401) + + user = all_auth_tokens[token]["user"] + user = Users.query.filter_by(name=user).first() + + libraries = Libraries.query.filter_by().all() + libraries_list = [library.__dict__ for library in libraries] + for library in libraries_list: + del library["_sa_instance_state"] + # check if lib already have a parent + parent = LibrariesMerge.query.filter_by(child_lib=library["lib_name"]).first() + if parent is not None: + library["merge_parent"] = parent.parent_lib + # if lib is a parent, can't be a child + child = LibrariesMerge.query.filter_by(parent_lib=library["lib_name"]).first() + if child is None: + library_type = library["lib_type"] + # for all lib of the same type, remove the actual lib, and add all the lib to "possible_merge_parent" + for lib in libraries_list: + is_child = LibrariesMerge.query.filter_by( + child_lib=lib["lib_name"] + ).first() + if ( + lib["lib_type"] == library_type + and lib["lib_name"] != library["lib_name"] + and is_child is None + ): + if "possible_merge_parent" not in library: + library["possible_merge_parent"] = [] + data = {"value": lib["lib_name"], "text": lib["lib_name"]} + library["possible_merge_parent"].append(data) + + if user.account_type != "Admin": + for library in libraries_list: + if library["available_for"] is not None: + available_for = str(library["available_for"]).split(",") + if str(user.id) not in available_for: + libraries_list.remove(library) + + + + generate_log(request, "SERVER") + + return jsonify(libraries_list) + + +@libraries_bp.route("/create_library", methods=["POST"]) +def create_lib(): + the_request = request.get_json() + the_request = json.loads(the_request) + lib_name = the_request["lib_name"] + lib_path = the_request["lib_path"] + lib_type = the_request["lib_type"] + lib_users = the_request["lib_users"] + + if lib_users == "": + lib_users = None + + icons = { + "movies": "film", + "series": "videocam", + "consoles": "game-controller", + "tv": "tv", + "others": "desktop", + "books": "book", + "musics": "headset", + } + + function_to_call = { + "movies": scans.getMovies, + "series": scans.getSeries, + "consoles": scans.getGames, + "others": scans.getOthersVideos, + "books": scans.getBooks, + "musics": scans.getMusics, + } + + lib_path = lib_path.replace("\\", "/") + + exists = Libraries.query.filter_by(lib_name=lib_name).first() is not None + if not exists: + new_lib = Libraries( + lib_name=lib_name, + lib_folder=lib_path, + lib_type=lib_type, + lib_image=icons[lib_type], + available_for=lib_users, + ) + DB.session.add(new_lib) + DB.session.commit() + try: + function_to_call[lib_type](lib_name) + except Exception: + pass + + return jsonify({"error": "worked"}) + else: + abort(409) + + +@libraries_bp.route("/edit_library", methods=["POST"]) +def edit_lib(): + token = request.headers.get("Authorization") + if token not in all_auth_tokens: + abort(401) + + the_request = request.get_json() + default_path = the_request["default_path"] + lib_name = the_request["name"] + lib_path = the_request["path"] + lib_type = the_request["type"] + lib_users = the_request["users"] + merge_parent = the_request["merge_parent"] + + merge_libraries(merge_parent, lib_name) + + lib_path = lib_path.replace("\\", "/") + + lib = Libraries.query.filter_by(lib_folder=default_path).first() + if lib is None: + abort(404) + + if lib_path is not None: + lib.lib_folder = lib_path + if lib_type is not None: + lib.lib_type = lib_type + if lib_users is not None: + if len(lib_users.split(",")) == 1: + lib_users = int(lib_users.replace('"', "")) + lib.available_for = lib_users + DB.session.commit() + return jsonify({"error": "worked"}) + + +@libraries_bp.route("/delete_library", methods=["POST"]) +def delete_lib(): + the_request = request.get_json() + + lib_name = the_request["name"] + lib = Libraries.query.filter_by(lib_name=lib_name).first() + + if lib is None: + abort(404) + + DB.session.delete(lib) + + lib_type = lib.lib_type + + if lib_type == "movies": + all_movies = Movies.query.filter_by(library_name=lib_name).all() + for movie in all_movies: + DB.session.delete(movie) + elif lib_type == "series": + all_series = Series.query.filter_by(library_name=lib_name).all() + for serie in all_series: + seasons = Seasons.query.filter_by(serie=serie.id).all() + for season in seasons: + episodes = Episodes.query.filter_by(season_id=season.season_id).all() + for episode in episodes: + DB.session.delete(episode) + DB.session.delete(season) + DB.session.delete(serie) + elif lib_type == "consoles": + all_games = Games.query.filter_by(library_name=lib_name).all() + for game in all_games: + DB.session.delete(game) + elif lib_type == "others": + all_other = OthersVideos.query.filter_by(library_name=lib_name).all() + for other in all_other: + DB.session.delete(other) + + DB.session.commit() + return jsonify({"error": "worked"}) + + +@libraries_bp.route("/rescan_all", methods=["POST"]) +def rescan_all(): + libraries = Libraries.query.all() + libraries = [library.__dict__ for library in libraries] + + libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_name"])) + libraries = natsort.natsorted(libraries, key=itemgetter(*["lib_type"])) + + type_to_call = { + "series": scans.getSeries, + "movies": scans.getMovies, + "consoles": scans.getGames, + "others": scans.getOthersVideos, + "books": scans.getBooks, + "musics": scans.getMusics, + } + + for library in libraries: + if library["lib_type"] in type_to_call: + type_to_call[library["lib_type"]](library["lib_name"]) + return jsonify(True) + + +@libraries_bp.route("/rescan/", methods=["POST"]) +def rescan(library): + exists = Libraries.query.filter_by(lib_name=library).first() is not None + + type_to_call = { + "series": scans.getSeries, + "movies": scans.getMovies, + "consoles": scans.getGames, + "others": scans.getOthersVideos, + "books": scans.getBooks, + "musics": scans.getMusics, + } + + if exists: + library = Libraries.query.filter_by(lib_name=library).first().__dict__ + merges = LibrariesMerge.query.filter_by(parent_lib=library["lib_name"]).all() + for merge in merges: + child = Libraries.query.filter_by(lib_name=merge.child_lib).first() + type_to_call[child.lib_type](child.lib_name) + type_to_call[library["lib_type"]](library["lib_name"]) + return jsonify(True) + return jsonify(False) + + +def merge_libraries(parent, child): + if not child: + return + + if not parent: + merge = LibrariesMerge.query.filter_by(child_lib=child).first() + if merge is not None: + DB.session.delete(merge) + DB.session.commit() + return + + parent = Libraries.query.filter_by(lib_name=parent).first() + if parent is None: + return + + child = Libraries.query.filter_by(lib_name=child).first() + if child is None: + return + + if parent.lib_type != child.lib_type: + return + + exist = LibrariesMerge.query.filter_by( + parent_lib=parent.lib_name, child_lib=child.lib_name + ).first() + # child is already a parent + is_parent = LibrariesMerge.query.filter_by(parent_lib=child.lib_name).first() + + if exist is None and is_parent is None: + fusion = LibrariesMerge(parent_lib=parent.lib_name, child_lib=child.lib_name) + DB.session.add(fusion) + DB.session.commit() + elif is_parent is None: + fusion = LibrariesMerge.query.filter_by( + parent_lib=parent.lib_name, child_lib=child.lib_name + ).first() + DB.session.delete(fusion) + DB.session.commit() + return diff --git a/src/chocolate_app/routes/settings.py b/src/chocolate_app/routes/settings.py index 0e12815..120ac7e 100644 --- a/src/chocolate_app/routes/settings.py +++ b/src/chocolate_app/routes/settings.py @@ -1,142 +1,142 @@ -from flask import Blueprint, jsonify, request - -from chocolate_app import config, write_config, tmdb -from chocolate_app.tables import Users, Libraries - -settings_bp = Blueprint("settings", __name__) - - -@settings_bp.route("/get_settings", methods=["GET"]) -def get_settings(): - all_users = Users.query.all() - all_libraries = Libraries.query.all() - users = [] - for user in all_users: - user = user.__dict__ - del user["_sa_instance_state"] - user["password"] = "Ratio" - users.append(user) - - libs = [] - for library in all_libraries: - library = library.__dict__ - del library["_sa_instance_state"] - libs.append(library) - - data = { - "users": users, - "libraries": libs, - } - - all_sections = config.sections() - for section in all_sections: - section_data = config[section] - the_data = {} - for key in section_data: - the_data[key] = section_data[key] - data[section] = the_data - - return jsonify(data) - - -@settings_bp.route("/save_settings", methods=["GET", "POST"]) -def save_settings(): - global client_id, client_secret - body = request.get_json() - tmdb_api_key = body["tmdbKey"] - language = body["language"] - igdb_secret_key = body["igdbSecret"] - igdb_client_id = body["igdbID"] - - radarr_adress = body["radarrAdress"] - radarrfolder = body["radarrFolder"] - radarr_api_key = body["radarrAPI"] - sonarr_adress = body["sonarrAdress"] - sonarrfolder = body["sonarrFolder"] - sonarr_api_key = body["sonarrAPI"] - readarr_adress = body["readarrAdress"] - readarrfolder = body["readarrFolder"] - readarr_api_key = body["readarrAPI"] - lidarr_adress = body["lidarrAdress"] - lidarrfolder = body["lidarrFolder"] - lidarr_api_key = body["lidarrAPI"] - - if radarr_adress != "": - if radarr_adress.startswith("https://"): - radarr_adress = radarr_adress.replace("https://", "http://") - if not radarr_adress.startswith("http://"): - radarr_adress = f"http://{radarr_adress}" - config.set("ARRSettings", "radarrurl", radarr_adress) - if radarrfolder != "": - radarrfolder = radarrfolder.replace("\\", "/") - if not radarrfolder.endswith("/"): - radarrfolder = f"{radarrfolder}/" - config.set("ARRSettings", "radarrfolder", radarrfolder) - if radarr_api_key != "": - config.set("APIKeys", "radarr", radarr_api_key) - - if sonarr_adress != "": - if sonarr_adress.startswith("https://"): - sonarr_adress = sonarr_adress.replace("https://", "http://") - if not sonarr_adress.startswith("http://"): - sonarr_adress = f"http://{sonarr_adress}" - config.set("ARRSettings", "sonarrurl", sonarr_adress) - if sonarrfolder != "": - sonarrfolder = sonarrfolder.replace("\\", "/") - if not sonarrfolder.endswith("/"): - sonarrfolder = f"{sonarrfolder}/" - config.set("ARRSettings", "sonarrfolder", sonarrfolder) - if sonarr_api_key != "": - config.set("APIKeys", "sonarr", sonarr_api_key) - - if readarr_adress != "": - if readarr_adress.startswith("https://"): - readarr_adress = readarr_adress.replace("https://", "http://") - if not readarr_adress.startswith("http://"): - readarr_adress = f"http://{readarr_adress}" - config.set("ARRSettings", "readarrurl", readarr_adress) - if readarrfolder != "": - readarrfolder = readarrfolder.replace("\\", "/") - if not readarrfolder.endswith("/"): - readarrfolder = f"{readarrfolder}/" - config.set("ARRSettings", "readarrfolder", readarrfolder) - if readarr_api_key != "": - config.set("ARRSettings", "readarrurl", readarr_adress) - - if lidarr_adress != "": - if lidarr_adress.startswith("https://"): - lidarr_adress = lidarr_adress.replace("https://", "http://") - if not lidarr_adress.startswith("http://"): - lidarr_adress = f"http://{lidarr_adress}" - config.set("ARRSettings", "lidarrurl", lidarr_adress) - if lidarrfolder != "": - lidarrfolder = lidarrfolder.replace("\\", "/") - if not lidarrfolder.endswith("/"): - lidarrfolder = f"{lidarrfolder}/" - config.set("ARRSettings", "lidarrfolder", lidarrfolder) - if lidarr_api_key != "": - config.set("ARRSettings", "lidarrurl", lidarr_adress) - if tmdb_api_key != "": - config.set("APIKeys", "TMDB", tmdb_api_key) - tmdb.api_key = tmdb_api_key - if igdb_client_id != "" and igdb_secret_key != "": - config.set("APIKeys", "igdbid", igdb_client_id) - config.set("APIKeys", "igdbsecret", igdb_secret_key) - client_id = igdb_client_id - client_secret = igdb_secret_key - - if language != "undefined": - config.set("ChocolateSettings", "language", language) - - try: - allow_download = body["allowDownloadsCheckbox"] - if allow_download == "on": - config.set("ChocolateSettings", "allowdownload", "true") - else: - config.set("ChocolateSettings", "allowdownload", "false") - except Exception: - config.set("ChocolateSettings", "allowdownload", "false") - - write_config(config) - - return jsonify({"error": "success"}) +from flask import Blueprint, jsonify, request + +from chocolate_app import config, write_config, tmdb +from chocolate_app.tables import Users, Libraries + +settings_bp = Blueprint("settings", __name__) + + +@settings_bp.route("/get_settings", methods=["GET"]) +def get_settings(): + all_users = Users.query.all() + all_libraries = Libraries.query.all() + users = [] + for user in all_users: + user = user.__dict__ + del user["_sa_instance_state"] + user["password"] = "Ratio" + users.append(user) + + libs = [] + for library in all_libraries: + library = library.__dict__ + del library["_sa_instance_state"] + libs.append(library) + + data = { + "users": users, + "libraries": libs, + } + + all_sections = config.sections() + for section in all_sections: + section_data = config[section] + the_data = {} + for key in section_data: + the_data[key] = section_data[key] + data[section] = the_data + + return jsonify(data) + + +@settings_bp.route("/save_settings", methods=["GET", "POST"]) +def save_settings(): + global client_id, client_secret + body = request.get_json() + tmdb_api_key = body["tmdbKey"] + language = body["language"] + igdb_secret_key = body["igdbSecret"] + igdb_client_id = body["igdbID"] + + radarr_adress = body["radarrAdress"] + radarrfolder = body["radarrFolder"] + radarr_api_key = body["radarrAPI"] + sonarr_adress = body["sonarrAdress"] + sonarrfolder = body["sonarrFolder"] + sonarr_api_key = body["sonarrAPI"] + readarr_adress = body["readarrAdress"] + readarrfolder = body["readarrFolder"] + readarr_api_key = body["readarrAPI"] + lidarr_adress = body["lidarrAdress"] + lidarrfolder = body["lidarrFolder"] + lidarr_api_key = body["lidarrAPI"] + + if radarr_adress != "": + if radarr_adress.startswith("https://"): + radarr_adress = radarr_adress.replace("https://", "http://") + if not radarr_adress.startswith("http://"): + radarr_adress = f"http://{radarr_adress}" + config.set("ARRSettings", "radarrurl", radarr_adress) + if radarrfolder != "": + radarrfolder = radarrfolder.replace("\\", "/") + if not radarrfolder.endswith("/"): + radarrfolder = f"{radarrfolder}/" + config.set("ARRSettings", "radarrfolder", radarrfolder) + if radarr_api_key != "": + config.set("APIKeys", "radarr", radarr_api_key) + + if sonarr_adress != "": + if sonarr_adress.startswith("https://"): + sonarr_adress = sonarr_adress.replace("https://", "http://") + if not sonarr_adress.startswith("http://"): + sonarr_adress = f"http://{sonarr_adress}" + config.set("ARRSettings", "sonarrurl", sonarr_adress) + if sonarrfolder != "": + sonarrfolder = sonarrfolder.replace("\\", "/") + if not sonarrfolder.endswith("/"): + sonarrfolder = f"{sonarrfolder}/" + config.set("ARRSettings", "sonarrfolder", sonarrfolder) + if sonarr_api_key != "": + config.set("APIKeys", "sonarr", sonarr_api_key) + + if readarr_adress != "": + if readarr_adress.startswith("https://"): + readarr_adress = readarr_adress.replace("https://", "http://") + if not readarr_adress.startswith("http://"): + readarr_adress = f"http://{readarr_adress}" + config.set("ARRSettings", "readarrurl", readarr_adress) + if readarrfolder != "": + readarrfolder = readarrfolder.replace("\\", "/") + if not readarrfolder.endswith("/"): + readarrfolder = f"{readarrfolder}/" + config.set("ARRSettings", "readarrfolder", readarrfolder) + if readarr_api_key != "": + config.set("ARRSettings", "readarrurl", readarr_adress) + + if lidarr_adress != "": + if lidarr_adress.startswith("https://"): + lidarr_adress = lidarr_adress.replace("https://", "http://") + if not lidarr_adress.startswith("http://"): + lidarr_adress = f"http://{lidarr_adress}" + config.set("ARRSettings", "lidarrurl", lidarr_adress) + if lidarrfolder != "": + lidarrfolder = lidarrfolder.replace("\\", "/") + if not lidarrfolder.endswith("/"): + lidarrfolder = f"{lidarrfolder}/" + config.set("ARRSettings", "lidarrfolder", lidarrfolder) + if lidarr_api_key != "": + config.set("ARRSettings", "lidarrurl", lidarr_adress) + if tmdb_api_key != "": + config.set("APIKeys", "TMDB", tmdb_api_key) + tmdb.api_key = tmdb_api_key + if igdb_client_id != "" and igdb_secret_key != "": + config.set("APIKeys", "igdbid", igdb_client_id) + config.set("APIKeys", "igdbsecret", igdb_secret_key) + client_id = igdb_client_id + client_secret = igdb_secret_key + + if language != "undefined": + config.set("ChocolateSettings", "language", language) + + try: + allow_download = body["allowDownloadsCheckbox"] + if allow_download == "on": + config.set("ChocolateSettings", "allowdownload", "true") + else: + config.set("ChocolateSettings", "allowdownload", "false") + except Exception: + config.set("ChocolateSettings", "allowdownload", "false") + + write_config(config) + + return jsonify({"error": "success"}) diff --git a/src/chocolate_app/routes/users.py b/src/chocolate_app/routes/users.py index ab586c4..075d3bb 100644 --- a/src/chocolate_app/routes/users.py +++ b/src/chocolate_app/routes/users.py @@ -1,257 +1,262 @@ -import os -import time -import base64 -import io - -from PIL import Image -from flask import Blueprint, jsonify, request, abort -from werkzeug.security import generate_password_hash - -from chocolate_app import DB, get_dir_path, all_auth_tokens, IMAGES_PATH -from chocolate_app.tables import Users, InviteCodes -from ..utils.utils import check_authorization, generate_log - - -dir_path = get_dir_path() -users_bp = Blueprint("users", __name__) - - -@users_bp.route("/get_all_users", methods=["GET"]) -def get_all_users(): - all_users = Users.query.filter().all() - all_users_list = [] - for user in all_users: - profil_picture = user.profil_picture - if not os.path.exists(dir_path + profil_picture): - profil_picture = "/static/img/avatars/defaultUserProfilePic.png" - user_dict = { - "name": user.name, - "profil_picture": profil_picture, - "account_type": user.account_type, - "password_empty": True if not user.password else False, - "id": user.id, - } - all_users_list.append(user_dict) - return jsonify(all_users_list) - - -@users_bp.route("/login", methods=["POST"]) -def login(): - from uuid import uuid4 - - auth_token = str(uuid4()) - account_name = request.get_json()["name"] - account_password = request.get_json()["password"] - user = Users.query.filter_by(name=account_name).first() - token = f"Bearer {auth_token}" - actual_time_in_seconds = int(time.time()) - all_auth_tokens[token] = {"user": account_name, "time": actual_time_in_seconds} - if user: - if user.account_type == "Kid": - generate_log(request, "LOGIN") - return jsonify( - {"id": user.id, "name": user.name, "error": "None", "token": auth_token} - ) - elif user.verify_password(account_password): - generate_log(request, "LOGIN") - return jsonify( - {"id": user.id, "name": user.name, "error": "None", "token": auth_token} - ) - else: - generate_log(request, "ERROR") - return jsonify({"error": "Unauthorized"}) - else: - generate_log(request, "ERROR") - return jsonify({"error": "Unauthorized"}) - - -@users_bp.route("/create_account", methods=["POST"]) -def create_account(): - body = request.get_json() - account_name = body["username"] - account_password = body["password"] - account_type_input = body["type"] - - profil_picture = f"{IMAGES_PATH}/avatars/{account_name}.webp" - if "profil_picture" not in body: - profil_picture = "/static/img/avatars/defaultUserProfilePic.png" - else: - file_base64 = body["profil_picture"] - if file_base64.startswith("data:image"): - file_base64 = file_base64.split(",", 1)[1] - - full_path = profil_picture - - image_data = base64.b64decode(file_base64) - - # Lire l'image à partir des bytes - image = Image.open(io.BytesIO(image_data)) - - # Déterminer le format de l'image - image_format = image.format.lower() - - # Convertir l'image en format WebP si nécessaire - if image_format != "webp": - output_buffer = io.BytesIO() - image.save(output_buffer, "WEBP") - output_buffer.seek(0) - image = Image.open(output_buffer) - - # Enregistrer l'image au format WebP - image.save(full_path, "WEBP") - - user_exists = Users.query.filter_by(name=account_name).first() - - if user_exists: - abort(409) - account_type_input = account_type_input.lower() - account_type_input = account_type_input.capitalize() - new_user = Users( - name=account_name, - password=account_password, - profil_picture=profil_picture, - account_type=account_type_input, - ) - DB.session.add(new_user) - DB.session.commit() - return jsonify( - { - "id": new_user.id, - "name": new_user.name, - } - ) - - -@users_bp.route("/edit_profil", methods=["POST"]) -def edit_profil(): - authorization = request.headers.get("Authorization") - - if authorization not in all_auth_tokens: - abort(401, "Unauthorized") - - user = Users.query.filter_by(name=all_auth_tokens[authorization]["user"]).first() - - body = request.get_json() - - user_name = body["username"] - password = body["password"] - - type = None - if "type" in body: - type = body["type"] - id = body["id"] - - if str(id) != str(user.id) and user.account_type != "Admin": - abort(401, "Unauthorized") - - print(all_auth_tokens) - print(authorization) - username_in_tokens = all_auth_tokens[authorization]["user"] - user = Users.query.filter_by(name=username_in_tokens).first() - try: - f = request.files["image"] - name, extension = os.path.splitext(f.filename) - profil_picture = f"/static/img/{user_name}{extension}" - if extension == "": - profil_picture = "/static/img/avatars/defaultUserProfilePic.png" - except Exception: - profil_picture = "/static/img/avatars/defaultUserProfilePic.png" - - user_to_edit = Users.query.filter_by(id=id).first() - - if user_to_edit.name != user_name: - user_to_edit.name = user_name - - if type and user_to_edit.account_type != type: - user_to_edit.account_type = type - - if user_to_edit.password != generate_password_hash(password) and len(password) > 0: - if password == "": - user_to_edit.password = None - else: - user_to_edit.password = generate_password_hash(password) - if ( - user_to_edit.profil_picture != profil_picture - and "/static/img/avatars/defaultUserProfilePic.png" not in profil_picture - ): - f = request.files["profil_picture"] - f.save(f"{dir_path}{profil_picture}") - user_to_edit.profil_picture = profil_picture - - DB.session.commit() - - return jsonify( - { - "id": user_to_edit.id, - "name": user_to_edit.name, - } - ) - - -@users_bp.route("/delete_account", methods=["POST"]) -def delete_account(): - authorization = request.headers.get("Authorization") - check_authorization(request, authorization) - print(authorization) - body = request.get_json() - id = body["id"] - print(id) - - user = Users.query.filter_by(id=id).first() - DB.session.delete(user) - DB.session.commit() - - return jsonify( - { - "id": user.id, - "name": user.name, - } - ) - - -@users_bp.route("/get_profil/") -def get_profil(id): - user = Users.query.filter_by(id=id).first() - profil_picture = user.profil_picture - if not os.path.exists(profil_picture): - profil_picture = "/static/img/avatars/defaultUserProfilePic.png" - user_dict = { - "name": user.name, - "profil_picture": profil_picture, - "account_type": user.account_type, - } - return jsonify(user_dict) - - -@users_bp.route("/is_admin", methods=["GET"]) -def is_admin(): - authorization = request.headers.get("Authorization") - check_authorization(request, authorization) - user = Users.query.filter_by(name=all_auth_tokens[authorization]["user"]).first() - if user.account_type == "Admin": - return jsonify(True) - else: - return jsonify(False) - - -@users_bp.route("/invite_exist/", methods=["GET"]) -def invite_exist(hash): - can = InviteCodes.query.filter_by(code=hash).first() is not None - return jsonify(can) - - -@users_bp.route("/create_invite", methods=["POST"]) -def create_invite(): - authorization = request.headers.get("Authorization") - check_authorization(request, authorization) - user = Users.query.filter_by(name=all_auth_tokens[authorization]["user"]).first() - - if user.account_type != "Admin": - abort(401, "Unauthorized") - - body = request.get_json() - code = body["code"] - new_invite = InviteCodes(code=code) - DB.session.add(new_invite) - DB.session.commit() - return jsonify({"code": code}) +import os +import time +import base64 +import io + +from PIL import Image +from flask import Blueprint, jsonify, request, abort +from werkzeug.security import generate_password_hash + +from chocolate_app import DB, get_dir_path, all_auth_tokens, IMAGES_PATH +from chocolate_app.tables import Users, InviteCodes +from ..utils.utils import check_authorization, generate_log + + +dir_path = get_dir_path() +users_bp = Blueprint("users", __name__) + + +@users_bp.route("/get_all_users", methods=["GET"]) +def get_all_users(): + all_users = Users.query.filter().all() + all_users_list = [] + for user in all_users: + profil_picture = user.profil_picture + if not os.path.exists(dir_path + profil_picture): + profil_picture = "/static/img/avatars/defaultUserProfilePic.png" + user_dict = { + "name": user.name, + "profil_picture": profil_picture, + "account_type": user.account_type, + "password_empty": True if not user.password else False, + "id": user.id, + } + all_users_list.append(user_dict) + return jsonify(all_users_list) + + +@users_bp.route("/login", methods=["POST"]) +def login(): + from uuid import uuid4 + + auth_token = str(uuid4()) + account_name = request.get_json()["name"] + account_password = request.get_json()["password"] + user = Users.query.filter_by(name=account_name).first() + token = f"Bearer {auth_token}" + actual_time_in_seconds = int(time.time()) + all_auth_tokens[token] = {"user": account_name, "time": actual_time_in_seconds} + if user: + if user.account_type == "Kid": + generate_log(request, "LOGIN") + return jsonify( + {"id": user.id, "name": user.name, "error": "None", "token": auth_token} + ) + elif user.verify_password(account_password): + generate_log(request, "LOGIN") + return jsonify( + {"id": user.id, "name": user.name, "error": "None", "token": auth_token} + ) + else: + generate_log(request, "ERROR") + return jsonify({"error": "Unauthorized"}) + else: + generate_log(request, "ERROR") + return jsonify({"error": "Unauthorized"}) + + +@users_bp.route("/create_account", methods=["POST"]) +def create_account(): + body = request.get_json() + account_name = body["username"] + account_password = body["password"] + account_type_input = body["type"] + + profil_picture = f"{IMAGES_PATH}/avatars/{account_name}.webp" + if "profil_picture" not in body: + profil_picture = "/static/img/avatars/defaultUserProfilePic.png" + else: + file_base64 = body["profil_picture"] + if file_base64.startswith("data:image"): + file_base64 = file_base64.split(",", 1)[1] + + full_path = profil_picture + + image_data = base64.b64decode(file_base64) + + # Lire l'image à partir des bytes + image = Image.open(io.BytesIO(image_data)) + + # Déterminer le format de l'image + image_format = image.format.lower() + + # Convertir l'image en format WebP si nécessaire + if image_format != "webp": + output_buffer = io.BytesIO() + image.save(output_buffer, "WEBP") + output_buffer.seek(0) + image = Image.open(output_buffer) + + # Enregistrer l'image au format WebP + image.save(full_path, "WEBP") + image.close() + + user_exists = Users.query.filter_by(name=account_name).first() + nb_users = len(Users.query.filter().all()) + if user_exists: + abort(409) + account_type_input = account_type_input.lower() + account_type_input = account_type_input.capitalize() + + if nb_users == 0: + account_type_input = "Admin" + + new_user = Users( + name=account_name, + password=account_password, + profil_picture=profil_picture, + account_type=account_type_input, + ) + DB.session.add(new_user) + DB.session.commit() + return jsonify( + { + "id": new_user.id, + "name": new_user.name, + } + ) + + +@users_bp.route("/edit_profil", methods=["POST"]) +def edit_profil(): + authorization = request.headers.get("Authorization") + + if authorization not in all_auth_tokens: + abort(401, "Unauthorized") + + user = Users.query.filter_by(name=all_auth_tokens[authorization]["user"]).first() + + body = request.get_json() + + user_name = body["name"] + password = body["password"] + + type = None + if "type" in body: + type = body["type"] + id = body["id"] + + if str(id) != str(user.id) and user.account_type != "Admin": + abort(401, "Unauthorized") + + print(all_auth_tokens) + print(authorization) + username_in_tokens = all_auth_tokens[authorization]["user"] + user = Users.query.filter_by(name=username_in_tokens).first() + try: + f = request.files["image"] + name, extension = os.path.splitext(f.filename) + profil_picture = f"/static/img/{user_name}{extension}" + if extension == "": + profil_picture = "/static/img/avatars/defaultUserProfilePic.png" + except Exception: + profil_picture = "/static/img/avatars/defaultUserProfilePic.png" + + user_to_edit = Users.query.filter_by(id=id).first() + + if user_to_edit.name != user_name: + user_to_edit.name = user_name + + if type and user_to_edit.account_type != type: + user_to_edit.account_type = type + + if user_to_edit.password != generate_password_hash(password) and len(password) > 0: + user_to_edit.password = generate_password_hash(password) + + if password == "": + user_to_edit.password = None + if ( + user_to_edit.profil_picture != profil_picture + and "/static/img/avatars/defaultUserProfilePic.png" not in profil_picture + ): + f = request.files["profil_picture"] + f.save(f"{dir_path}{profil_picture}") + user_to_edit.profil_picture = profil_picture + + DB.session.commit() + + return jsonify( + { + "id": user_to_edit.id, + "name": user_to_edit.name, + } + ) + + +@users_bp.route("/delete_account", methods=["POST"]) +def delete_account(): + authorization = request.headers.get("Authorization") + check_authorization(request, authorization) + print(authorization) + body = request.get_json() + id = body["id"] + print(id) + + user = Users.query.filter_by(id=id).first() + DB.session.delete(user) + DB.session.commit() + + return jsonify( + { + "id": user.id, + "name": user.name, + } + ) + + +@users_bp.route("/get_profil/") +def get_profil(id): + user = Users.query.filter_by(id=id).first() + profil_picture = user.profil_picture + if not os.path.exists(profil_picture): + profil_picture = "/static/img/avatars/defaultUserProfilePic.png" + user_dict = { + "name": user.name, + "profil_picture": profil_picture, + "account_type": user.account_type, + } + return jsonify(user_dict) + + +@users_bp.route("/is_admin", methods=["GET"]) +def is_admin(): + authorization = request.headers.get("Authorization") + check_authorization(request, authorization) + user = Users.query.filter_by(name=all_auth_tokens[authorization]["user"]).first() + if user.account_type == "Admin": + return jsonify(True) + else: + return jsonify(False) + + +@users_bp.route("/invite_exist/", methods=["GET"]) +def invite_exist(hash): + can = InviteCodes.query.filter_by(code=hash).first() is not None + return jsonify(can) + + +@users_bp.route("/create_invite", methods=["POST"]) +def create_invite(): + authorization = request.headers.get("Authorization") + check_authorization(request, authorization) + user = Users.query.filter_by(name=all_auth_tokens[authorization]["user"]).first() + + if user.account_type != "Admin": + abort(401, "Unauthorized") + + body = request.get_json() + code = body["code"] + new_invite = InviteCodes(code=code) + DB.session.add(new_invite) + DB.session.commit() + return jsonify({"code": code}) diff --git a/src/chocolate_app/scans.py b/src/chocolate_app/scans.py index 6df3f14..c6c0ab8 100644 --- a/src/chocolate_app/scans.py +++ b/src/chocolate_app/scans.py @@ -1,2443 +1,2473 @@ -import deezer -import requests -import os -import rarfile -import zipfile -import zlib -import ast -import datetime -import sqlalchemy -import re -import subprocess -import io -import uuid -import fitz - -from guessit import guessit -from Levenshtein import distance as lev -from tmdbv3api import TV, Episode, Movie, Person, Search, Group -from tmdbv3api.as_obj import AsObj -from tmdbv3api.exceptions import TMDbException -from PIL import Image -from tinytag import TinyTag -from deep_translator import GoogleTranslator - -from . import DB, get_dir_path, config, IMAGES_PATH -from .tables import ( - Libraries, - Movies, - Series, - Artists, - Albums, - Tracks, - Episodes, - Seasons, - Actors, - Games, - OthersVideos, - Books, -) - - -from .utils.utils import path_join - -dir_path = get_dir_path() - -deezer = deezer.Client() - -genre_list = { - 12: "Aventure", - 14: "Fantastique", - 16: "Animation", - 18: "Drama", - 27: "Horreur", - 28: "Action", - 35: "Comédie", - 36: "Histoire", - 37: "Western", - 53: "Thriller", - 80: "Crime", - 99: "Documentaire", - 878: "Science-fiction", - 9648: "Mystère", - 10402: "Musique", - 10749: "Romance", - 10751: "Famille", - 10752: "War", - 10759: "Action & Adventure", - 10762: "Kids", - 10763: "News", - 10764: "Reality", - 10765: "Sci-Fi & Fantasy", - 10766: "Soap", - 10767: "Talk", - 10768: "War & Politics", - 10769: "Western", - 10770: "TV Movie", -} - -websites_trailers = { - "YouTube": "https://www.youtube.com/embed/", - "Dailymotion": "https://www.dailymotion.com/video/", - "Vimeo": "https://vimeo.com/", -} - - -def transformToDict(obj): - if isinstance(obj, list): - return obj - if isinstance(obj, AsObj): - obj = str(obj) - obj = ast.literal_eval(obj) - return obj - return obj - - -def transformToList(obj): - if isinstance(obj, AsObj): - return list(obj) - if isinstance(obj, list): - return obj - return obj.replace('"', '\\"') - - -def length_video(path: str) -> float: - seconds = subprocess.run( - [ - "ffprobe", - "-v", - "error", - "-show_entries", - "format=duration", - "-of", - "default=noprint_wrappers=1:nokey=1", - path, - ], - stdout=subprocess.PIPE, - text=True, - ) - try: - return float(seconds.stdout) - except Exception: - return 0 - - -def createArtist(artistName, lib): - exists = Artists.query.filter_by(name=artistName).first() is not None - if exists: - return Artists.query.filter_by(name=artistName).first().id - - artists = deezer.search_artists(artistName) - artist = artists[0] - artist_id = artist.id - - exists = Artists.query.filter_by(id=artist_id).first() is not None - if exists: - return artist_id - - cover = artist.picture_big - - path = f"{IMAGES_PATH}/Artist_{artist_id}.png" - # Récupération de l'image, conversion en webp et sauvegarde dans le dossier static au nom "Artist_{artist_id}.webp" - with open(path, "wb") as f: - f.write(requests.get(cover).content) - - try: - img = Image.open(path) - img = img.save(f"{IMAGES_PATH}/Artist_{artist_id}.webp", "webp") - os.remove(f"{IMAGES_PATH}/Artist_{artist_id}.png") - path = f"{IMAGES_PATH}/Artist_{artist_id}.webp" - except Exception: - pass - - path = path.replace(dir_path, "") - - artist = Artists(id=artist_id, name=artistName, cover=path, library_name=lib) - DB.session.add(artist) - DB.session.commit() - - return artist_id - - -def createAlbum(name, artist_id, tracks=[], library=""): - exists = ( - Albums.query.filter_by(dir_name=name, artist_id=artist_id).first() is not None - ) - if exists: - Albums.query.filter_by( - dir_name=name, artist_id=artist_id - ).first().tracks = ",".join(tracks) - DB.session.commit() - return Albums.query.filter_by(dir_name=name, artist_id=artist_id).first().id - - albums = deezer.search_albums( - f"{Artists.query.filter_by(id=artist_id).first().name} - {name}" - ) - - # pour chaque album trouvé, on vérifie si le nom de est proche du nom de l'album qu'on cherche - if len(albums) == 0: - return None - best_match = albums[0] - - for album in albums: - if lev(name, album.title) < lev(name, best_match.title): - best_match = album - elif lev(name, album.title) == lev(name, best_match.title): - best_match = best_match - if lev(name, best_match.title) == 0: - break - - album = best_match - - album_id = album.id - exist = Albums.query.filter_by(id=album_id, artist_id=artist_id).first() is not None - if exist: - return album_id - album_name = album.title - cover = album.cover_big - - path = f"{IMAGES_PATH}/Album_{album_id}.png" - - with open(path, "wb") as f: - f.write(requests.get(cover).content) - - try: - img = Image.open(path) - img = img.save(f"{IMAGES_PATH}/Album_{album_id}.webp", "webp") - os.remove(path) - path = f"{IMAGES_PATH}/Album_{album_id}.webp" - except Exception: - pass - - path = path.replace(dir_path, "") - - tracks = ",".join(tracks) - - album = Albums( - id=album_id, - name=album_name, - dir_name=name, - artist_id=artist_id, - cover=path, - tracks=tracks, - library_name=library, - ) - DB.session.add(album) - DB.session.commit() - - return album_id - - -def getAlbumImage(album_name, path): - albums = deezer.search_albums(album_name) - album = albums[0] - album_name = album.title - cover = album.cover_big - - # Récupération de l'image, conversion en webp et sauvegarde dans le dossier static au nom "Album_{album_id}.webp" - with open(path, "wb") as f: - f.write(requests.get(cover).content) - - try: - img = Image.open(path) - img = img.save(path, "webp") - except Exception: - pass - - return path - - -def getArtistImage(artist_name, path): - artist = deezer.search_artists(artist_name)[0] - artist_name = artist.name - cover = artist.picture_big - - with open(path, "wb") as f: - f.write(requests.get(cover).content) - - try: - img = Image.open(path) - img = img.save(path, "webp") - except Exception: - pass - - return path - - -def generateImage(title, librairie, banner): - from PIL import Image, ImageDraw, ImageFont - - largeur = 1280 - hauteur = 720 - image = Image.new("RGB", (largeur, hauteur), color="#1d1d1d") - - # Ajouter les textes au centre de l'image - draw = ImageDraw.Draw(image) - - # Charger la police Poppins - font_path = f"{dir_path}/static/fonts/Poppins-Medium.ttf" - font_title = ImageFont.truetype(font_path, size=70) - font_librairie = ImageFont.truetype(font_path, size=50) - - # Positionner les textes au centre de l'image - titre_larg, titre_haut = draw.textsize(title, font=font_title) - librairie_larg, librairie_haut = draw.textsize(librairie, font=font_librairie) - x_title = int((largeur - titre_larg) / 2) - y_title = int((hauteur - titre_haut - librairie_haut - 50) / 2) - x_librairie = int((largeur - librairie_larg) / 2) - y_librairie = y_title + titre_haut + 50 - - # Ajouter le texte du titre - draw.text((x_title, y_title), title, font=font_title, fill="white", align="center") - - # Ajouter le texte de la librairie - draw.text( - (x_librairie, y_librairie), - librairie, - font=font_librairie, - fill="white", - align="center", - ) - - # Enregistrer l'image - os.remove(banner) - image.save(banner, "webp") - - -def is_connected(): - try: - requests.get("https://ww.google.com/").status_code - return True - except Exception: - return False - - -def printLoading(filesList, actualFile, title): - terminal_size = os.get_terminal_size().columns - 1 - try: - index = filesList.index(actualFile) + 1 - except Exception: - index = 0 - percentage = index * 100 / len(filesList) - - loading_first_part = ("•" * int(percentage * 0.2))[:-1] - loading_first_part = f"{loading_first_part}➤" - loading_second_part = "•" * (20 - int(percentage * 0.2)) - - loading = f"{str(int(percentage)).rjust(3)}% | [\33[32m{loading_first_part}\33[31m{loading_second_part}\33[0m] | {title} | {index}/{len(filesList)}" - loading2 = loading + " " * (terminal_size - len(loading)) - - if len(loading2) > terminal_size: - loading2 = loading2[: terminal_size - 3] + "..." - - print("\033[?25l", end="") - print(loading2, end="\r", flush=True) - - -def searchGame(game, console): - url = f"https://www.igdb.com/search_autocomplete_all?q={game.replace(' ', '%20')}" - return IGDBRequest(url, console) - - -def translate(string): - language = config["ChocolateSettings"]["language"] - if language == "EN": - return string - translated = GoogleTranslator(source="english", target=language.lower()).translate( - string - ) - return translated - - -def IGDBRequest(url, console): - custom_headers = { - "User-Agent": "Mozilla/5.0 (X11; UwUntu; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0", - "Accept": "*/*", - "X-Requested-With": "XMLHttpRequest", - "Origin": url, - "DNT": "1", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "Referer": url, - "Connection": "keep-alive", - "Pragma": "no-cache", - "Cache-Control": "no-cache", - } - response = requests.request("GET", url, headers=custom_headers) - - client_id = config.get("APIKeys", "IGDBID") - client_secret = config.get("APIKeys", "IGDBSECRET") - - if response.status_code == 200 and client_id and client_secret: - grant_type = "client_credentials" - get_access_token = f"https://id.twitch.tv/oauth2/token?client_id={client_id}&client_secret={client_secret}&grant_type={grant_type}" - token = requests.request("POST", get_access_token) - token = token.json() - if "message" in token and token["message"] == "invalid client secret": - print("Invalid client secret") - return None - if "access_token" not in token: - return None - token = token["access_token"] - - headers = { - "Accept": "application/json", - "Authorization": f"Bearer {token}", - "Client-ID": client_id, - } - - games = response.json()["game_suggest"] - - for i in games: - game = i - game_id = game["id"] - url = "https://api.igdb.com/v4/games" - body = f"fields name, cover.*, summary, total_rating, first_release_date, genres.*, platforms.*; where id = {game_id};" - response = requests.request("POST", url, headers=headers, data=body) - if len(response.json()) == 0: - break - game = response.json()[0] - if "platforms" in game: - game_platforms = game["platforms"] - try: - platforms = [] - - for i in game_platforms: - if "abbreviation" not in i: - platforms.append(i["alternative_name"]) - else: - platforms.append(i["abbreviation"]) - - real_console_name = { - "GB": "Game Boy", - "GBA": "Game Boy Advance", - "GBC": "Game Boy Color", - "N64": "Nintendo 64", - "NES": "Nintendo Entertainment System", - "NDS": "Nintendo DS", - "SNES": "Super Nintendo Entertainment System", - "Sega Master System": "Sega Master System", - "Sega Mega Drive": "Sega Mega Drive", - "PS1": "PS1", - } - - if ( - real_console_name[console] not in platforms - and console not in platforms - ): - continue - if "total_rating" not in game: - game["total_rating"] = "Unknown" - if "genres" not in game: - game["genres"] = [{"name": "Unknown"}] - if "summary" not in game: - game["summary"] = "Unknown" - if "first_release_date" not in game: - game["first_release_date"] = "Unknown" - if "cover" not in game: - game["cover"] = { - "url": "//images.igdb.com/igdb/image/upload/t_cover_big/nocover.png" - } - - game["summary"] = translate(game["summary"]) - game["genres"][0]["name"] = translate(game["genres"][0]["name"]) - - genres = [] - for genre in game["genres"]: - genres.append(genre["name"]) - genres = ", ".join(genres) - - game_data = { - "title": game["name"], - "cover": game["cover"]["url"].replace("//", "https://"), - "description": game["summary"], - "note": game["total_rating"], - "date": game["first_release_date"], - "genre": genres, - "id": game["id"], - } - return game_data - except Exception: - continue - return None - - -def getMovies(library_name): - all_movies_not_sorted = [] - path = Libraries.query.filter_by(lib_name=library_name).first().lib_folder - film_file_list = [] - try: - movie_files = os.listdir(path) - except Exception: - return - for movie_file in movie_files: - if not movie_file.endswith((".rar", ".zip", ".part")): - film_file_list.append(movie_file) - - if not is_connected(): - return - - film_file_list.sort() - movie = Movie() - - for searchedFilm in film_file_list: - movieTitle = searchedFilm - if os.path.isdir(path_join(path, searchedFilm)): - the_path = path_join(path, searchedFilm) - searchedFilm = path_join(searchedFilm, os.listdir(the_path)[0]) - else: - movieTitle, extension = os.path.splitext(movieTitle) - originalMovieTitle = movieTitle - - printLoading(film_file_list, searchedFilm, movieTitle) - - slug = searchedFilm - video_path = f"{path}/{slug}" - exists = Movies.query.filter_by(slug=video_path).first() is not None - - if not exists: - guessedData = guessit(originalMovieTitle) - guessedTitle = "" - year = None - if "title" not in guessedData: - guessedTitle = originalMovieTitle - else: - guessedTitle = guessedData["title"] - if "episode" in guessedData: - guessedTitle = f"{guessedData['episode']} {guessedTitle}" - if "alternative_title" in guessedData: - guessedTitle = ( - f"{guessedData['alternative_title']} - {guessedTitle}" - ) - if "part" in guessedData: - guessedTitle = f"{guessedTitle} Part {guessedData['part']}" - if "year" in guessedData: - year = guessedData["year"] - - try: - search = Search().movies(guessedTitle, year=year, adult=True) - except Exception: - search = Search().movies(guessedTitle, year=year) - - search = transformToDict(search) - if not search or not search["results"]: - all_movies_not_sorted.append(originalMovieTitle) - continue - - search = search["results"] - bestMatch = search[0] - if ( - config["ChocolateSettings"]["askwhichmovie"] == "false" - or len(search) == 1 - ): - for i in range(len(search)): - if ( - lev(guessedTitle, search[i]["title"]) - < lev(guessedTitle, bestMatch["title"]) - and bestMatch["title"] not in film_file_list - ): - bestMatch = search[i] - elif ( - lev(guessedTitle, search[i]["title"]) - == lev(guessedTitle, bestMatch["title"]) - and bestMatch["title"] not in film_file_list - ): - bestMatch = bestMatch - if ( - lev(guessedTitle, bestMatch["title"]) == 0 - and bestMatch["title"] not in film_file_list - ): - break - - res = bestMatch - try: - name = res["title"] - except AttributeError: - name = res["original_title"] - movie_id = res["id"] - details = movie.details(movie_id) - - movieCoverPath = f"https://image.tmdb.org/t/p/original{res['poster_path']}" - banner = f"https://image.tmdb.org/t/p/original{res['backdrop_path']}" - real_title, extension = os.path.splitext(originalMovieTitle) - - with open(f"{IMAGES_PATH}/{movie_id}_Cover.png", "wb") as f: - f.write(requests.get(movieCoverPath).content) - try: - img = Image.open(f"{IMAGES_PATH}/{movie_id}_Cover.png") - img.save(f"{IMAGES_PATH}/{movie_id}_Cover.webp", "webp") - os.remove(f"{IMAGES_PATH}/{movie_id}_Cover.png") - movieCoverPath = f"{IMAGES_PATH}/{movie_id}_Cover.webp" - except Exception: - try: - os.rename( - f"{IMAGES_PATH}/{movie_id}_Cover.png", - f"{IMAGES_PATH}/{movie_id}_Cover.webp", - ) - movieCoverPath = "/static/img/broken.webp" - except Exception: - os.remove(f"{IMAGES_PATH}/{movie_id}_Cover.webp") - os.rename( - f"{IMAGES_PATH}/{movie_id}_Cover.png", - f"{IMAGES_PATH}/{movie_id}_Cover.webp", - ) - movieCoverPath = f"{IMAGES_PATH}/{movie_id}_Cover.webp" - with open(f"{IMAGES_PATH}/{movie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - if not res["backdrop_path"]: - banner = f"https://image.tmdb.org/t/p/original{details.backdrop_path}" - if banner != "https://image.tmdb.org/t/p/originalNone": - with open(f"{IMAGES_PATH}/{movie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - else: - banner = "/static/img/broken.webp" - try: - img = Image.open(f"{IMAGES_PATH}/{movie_id}_Banner.png") - img.save(f"{IMAGES_PATH}/{movie_id}_Banner.webp", "webp") - os.remove(f"{IMAGES_PATH}/{movie_id}_Banner.png") - banner = f"{IMAGES_PATH}/{movie_id}_Banner.webp" - except Exception: - banner = "/static/img/brokenBanner.webp" - - description = res["overview"] - note = res["vote_average"] - try: - date = res["release_date"] - except AttributeError: - date = "Unknown" - casts = list(details.casts.cast)[:5] - theCast = [] - for cast in casts: - - actor_id = cast.id - actorImage = f"https://www.themovieDB.org/t/p/w600_and_h900_bestv2{cast.profile_path}" - if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): - with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: - f.write(requests.get(actorImage).content) - try: - img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") - img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") - os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") - except Exception: - os.rename( - f"{IMAGES_PATH}/Actor_{actor_id}.png", - f"{IMAGES_PATH}/Actor_{actor_id}.webp", - ) - - actorImage = f"{IMAGES_PATH}/Actor_{actor_id}.webp" - if actor_id not in theCast: - theCast.append(actor_id) - else: - break - person = Person() - p = person.details(cast.id) - exists = Actors.query.filter_by(actor_id=cast.id).first() is not None - if not exists: - actor = Actors( - name=cast.name, - actor_image=actorImage, - actor_description=p.biography, - actor_birth_date=p.birthday, - actor_birth_place=p.place_of_birth, - actor_programs=f"{movie_id}", - actor_id=cast.id, - ) - DB.session.add(actor) - DB.session.commit() - else: - actor = Actors.query.filter_by(actor_id=cast.id).first() - actor.actor_programs = f"{actor.actor_programs} {movie_id}" - DB.session.commit() - theCast = ",".join([str(i) for i in theCast]) - try: - date = datetime.datetime.strptime(date, "%Y-%m-%d").strftime("%d/%m/%Y") - except ValueError: - date = "Unknown" - except UnboundLocalError: - date = "Unknown" - - genre = res["genre_ids"] - try: - length = length_video(video_path) - length = str(datetime.timedelta(seconds=length)) - length = length.split(":") - except Exception: - length = [] - - if len(length) == 3: - hours = length[0] - minutes = length[1] - seconds = str(round(float(length[2]))) - if int(seconds) < 10: - seconds = f"0{seconds}" - length = f"{hours}:{minutes}:{seconds}" - elif len(length) == 2: - minutes = length[0] - seconds = str(round(float(length[1]))) - if int(seconds) < 10: - seconds = f"0{seconds}" - length = f"{minutes}:{seconds}" - elif len(length) == 1: - seconds = str(round(float(length[0]))) - if int(seconds) < 10: - seconds = f"0{seconds}" - length = f"00:{seconds}" - else: - length = "0" - - duration = length - - movieGenre = [] - for genre_id in genre: - movieGenre.append(genre_list[genre_id]) - movieGenre = ",".join(movieGenre) - - bandeAnnonce = details.videos.results - bande_annonce_url = "" - if len(bandeAnnonce) > 0: - for video in bandeAnnonce: - bandeAnnonceType = video.type - bandeAnnonceHost = video.site - bandeAnnonceKey = video.key - if bandeAnnonceType == "Trailer": - try: - bande_annonce_url = ( - websites_trailers[bandeAnnonceHost] + bandeAnnonceKey - ) - break - except KeyError: - bande_annonce_url = "Unknown" - - alternatives_names = [] - actualTitle = movieTitle - characters = [" ", "-", "_", ":", ".", ",", "!", "'", "`", '"'] - empty = "" - for character in characters: - for character2 in characters: - if character != character2: - stringTest = actualTitle.replace(character, character2) - alternatives_names.append(stringTest) - stringTest = actualTitle.replace(character2, character) - alternatives_names.append(stringTest) - stringTest = actualTitle.replace(character, empty) - alternatives_names.append(stringTest) - stringTest = actualTitle.replace(character2, empty) - alternatives_names.append(stringTest) - - officialAlternativeNames = movie.alternative_titles( - movie_id=movie_id - ).titles - if officialAlternativeNames is not None: - for officialAlternativeName in officialAlternativeNames: - alternatives_names.append(officialAlternativeName.title) - - alternatives_names = list(dict.fromkeys(alternatives_names)) - - alternatives_names = ",".join(alternatives_names) - filmData = Movies( - id=movie_id, - title=movieTitle, - real_title=name, - cover=movieCoverPath, - banner=banner, - slug=video_path, - description=description, - note=note, - date=date, - genre=movieGenre, - duration=str(duration), - cast=theCast, - bande_annonce_url=bande_annonce_url, - adult=str(res["adult"]), - library_name=library_name, - alternatives_names=alternatives_names, - file_date=os.path.getmtime(video_path), - ) - DB.session.add(filmData) - DB.session.commit() - - movie_files = Movies.query.filter_by(library_name=library_name).all() - for movie in movie_files: - slug = movie.slug - if not os.path.exists(slug): - DB.session.delete(movie) - DB.session.commit() - - -def getSeries(library_name): - allSeriesPath = Libraries.query.filter_by(lib_name=library_name).first().lib_folder - allSeries = os.listdir(allSeriesPath) - allSeriesName = [] - for dir in allSeries: - if os.path.isdir(f"{allSeriesPath}/{dir}"): - allSeriesName.append(f"{allSeriesPath}/{dir}") - - if not is_connected(): - return - - show = TV() - - for serie in allSeriesName: - if not isinstance(serie, str): - continue - - printLoading(allSeriesName, serie, serie) - - seriePath = serie - serieTitle = serie.split("/")[-1] - originalSerieTitle = serieTitle - try: - serie_modified_time = os.path.getmtime(seriePath) - except FileNotFoundError: - print(f"Cant find {originalSerieTitle}") - continue - - serie_guess = guessit(originalSerieTitle) - if "title" in serie_guess: - serieTitle = serie_guess["title"] - - if "alternative_title" in serie_guess: - serieTitle = f"{serieTitle} - {serie_guess['alternative_title']}" - - try: - if "year" in serie_guess: - search = Search().tv_shows(serieTitle, release_year=serie_guess["year"]) - else: - search = Search().tv_shows(serieTitle) - except TMDbException: - break - - search = search.results - search = transformToDict(search) - - if search == {}: - continue - - askForGoodSerie = config["ChocolateSettings"]["askWhichSerie"] - bestMatch = search[0] - if askForGoodSerie == "false" or len(search) == 1: - for i in range(len(search)): - if ( - lev(serieTitle, search[i]["name"]) - < lev(serieTitle, bestMatch["name"]) - and bestMatch["name"] not in allSeriesName - ): - bestMatch = search[i] - elif ( - lev(serieTitle, search[i]["name"]) - == lev(serieTitle, bestMatch["name"]) - and bestMatch["name"] not in allSeriesName - ): - bestMatch = bestMatch - if ( - lev(serieTitle, bestMatch["name"]) == 0 - and bestMatch["name"] not in allSeriesName - ): - break - - res = bestMatch - serie_id = str(res["id"]) - - if ( - DB.session.query(Series).filter_by(original_name=serieTitle).first() - is not None - ): - serie_id = ( - DB.session.query(Series).filter_by(original_name=serieTitle).first().id - ) - - exists = DB.session.query(Series).filter_by(id=serie_id).first() is not None - - details = show.details(serie_id) - defaultNbOfSeasons = details.number_of_seasons - defaultNbOfEpisodes = details.number_of_episodes - seasonsInfo = details.seasons - - seasonsNumber = [] - seasons = os.listdir(seriePath) - for season in seasons: - if os.path.isdir(f"{seriePath}/{season}") and season != "": - season = re.sub(r"\D", "", season) - if season == "": - continue - seasonsNumber.append(int(season)) - - episodes = [] - for season in seasons: - allEpisodes = os.listdir(f"{seriePath}/{season}") - for episode in allEpisodes: - if os.path.isfile( - f"{seriePath}/{season}/{episode}" - ): - episodes.append(episode) - - nbEpisodes = len(episodes) - nbSeasons = len(seasons) - - episodeGroups = show.episode_groups(serie_id).results - # print(f"Pour {serie_name} : nbEpisodes: {nbEpisodes} nbSeasons: {nbSeasons} defaultNbOfEpisodes: {defaultNbOfEpisodes} defaultNbOfSeasons: {defaultNbOfSeasons}") - - if nbEpisodes <= defaultNbOfEpisodes and nbSeasons <= defaultNbOfSeasons: - pass - elif len(episodeGroups) > 0: - seasonsInfo = None - for group in episodeGroups: - groupNbEpisodes = group.episode_count - groupNbSeasons = group.group_count - - if nbEpisodes >= groupNbEpisodes * 0.95 and nbSeasons == groupNbSeasons: - theGroup = Group() - seasonsInfo = theGroup.details(group.id).groups - for season in seasonsInfo: - season = season.__dict__ - if len(season["episodes"]) > 0: - season["season_number"] = season["order"] - season["episode_count"] = len(season["episodes"]) - print(len(season["episodes"])) - season["air_date"] = season["episodes"][0]["air_date"] - season["overview"] = "" - season["poster_path"] = season["episodes"][0]["still_path"] - if seasonsInfo is None: - for group in episodeGroups: - if nbEpisodes <= groupNbEpisodes and nbSeasons <= groupNbSeasons: - groupNbEpisodes = group.episode_count - groupNbSeasons = group.group_count - - if ( - nbEpisodes == groupNbEpisodes - and nbSeasons == groupNbSeasons - ): - theGroup = Group() - seasonsInfo = theGroup.details(group.id).groups - for season in seasonsInfo: - season["season_number"] = season["order"] - season["episode_count"] = len(season["episodes"]) - season["air_date"] = season["episodes"][0]["air_date"] - season["overview"] = "" - season["poster_path"] = season["episodes"][0][ - "still_path" - ] - break - - if seasonsInfo is None: - group = episodeGroups[0] - theGroup = Group() - seasonsInfo = theGroup.details(group.id).groups - for season in seasonsInfo: - season["season_number"] = season["order"] - season["episode_count"] = len(season["episodes"]) - season["air_date"] = season["episodes"][0]["air_date"] - season["overview"] = "" - season["poster_path"] = season["episodes"][0]["still_path"] - - name = res["name"] - if not exists: - cover = f"https://image.tmdb.org/t/p/original{res['poster_path']}" - banner = f"https://image.tmdb.org/t/p/original{res['backdrop_path']}" - if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Cover.png"): - with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: - f.write(requests.get(cover).content) - try: - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") - except Exception: - - pass - - if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.png"): - with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - try: - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") - except Exception: - - pass - - banner = f"{IMAGES_PATH}/{serie_id}_Banner.webp" - cover = f"{IMAGES_PATH}/{serie_id}_Cover.webp" - description = res["overview"] - note = res["vote_average"] - date = res["first_air_date"] - cast = details.credits.cast - runTime = details.episode_run_time - duration = "" - for i in range(len(runTime)): - if i != len(runTime) - 1: - duration += f"{str(runTime[i])}:" - else: - duration += f"{str(runTime[i])}" - serieGenre = details.genres - bandeAnnonce = details.videos.results - bande_annonce_url = "" - if len(bandeAnnonce) > 0: - for video in bandeAnnonce: - bandeAnnonceType = video.type - bandeAnnonceHost = video.site - bandeAnnonceKey = video.key - if bandeAnnonceType == "Trailer" or len(bandeAnnonce) == 1: - try: - bande_annonce_url = ( - websites_trailers[bandeAnnonceHost] + bandeAnnonceKey - ) - break - except KeyError: - bande_annonce_url = "Unknown" - - genreList = [] - for genre in serieGenre: - genreList.append(str(genre.name)) - genreList = ",".join(genreList) - newCast = [] - cast = list(cast)[:5] - for actor in cast: - actor_id = actor.id - actorImage = f"https://image.tmdb.org/t/p/original{actor.profile_path}" - image = f"{IMAGES_PATH}/Actor_{actor_id}.png" - if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): - try: - with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: - f.write(requests.get(actorImage).content) - img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") - img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") - os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") - image = f"{IMAGES_PATH}/Actor_{actor_id}.webp" - except Exception: - - pass - - actorImage = image - - actor.profile_path = str(actorImage) - newCast.append(actor_id) - - person = Person() - p = person.details(actor.id) - exists = Actors.query.filter_by(actor_id=actor.id).first() is not None - if not exists: - actor = Actors( - name=actor.name, - actor_id=actor.id, - actor_image=actorImage, - actor_description=p.biography, - actor_birth_date=p.birthday, - actor_birth_place=p.place_of_birth, - actor_programs=f"{serie_id}", - ) - DB.session.add(actor) - DB.session.commit() - else: - actor = Actors.query.filter_by(actor_id=actor.id).first() - if serie_id not in actor.actor_programs: - actor.actor_programs = f"{actor.actor_programs} {serie_id}" - DB.session.commit() - - newCast = newCast[:5] - newCast = ",".join([str(i) for i in newCast]) - isAdult = str(details["adult"]) - serieObject = Series( - id=serie_id, - name=name, - original_name=originalSerieTitle, - genre=genreList, - duration=duration, - description=description, - cast=newCast, - bande_annonce_url=bande_annonce_url, - cover=cover, - banner=banner, - note=note, - date=date, - serie_modified_time=serie_modified_time, - adult=isAdult, - library_name=library_name, - ) - DB.session.add(serieObject) - DB.session.commit() - - for season in seasonsInfo: - season = transformToDict(season) - allSeasons = os.listdir(seriePath) - url = None - for season_dir in allSeasons: - season_dir_number = re.sub(r"\D", "", season_dir) - if season_dir_number != "" and int(season_dir_number) == int(season["season_number"]): - url = f"{seriePath}/{season_dir}" - break - if not url: - #print(f"\nCan't find {serieTitle} season {season['season_number']}") - continue - season_dir = url - #print(f"\nSeason {season['season_number']} of {serieTitle} found: {season_dir}") - seasonInDB = Seasons.query.filter_by(season_id=season["id"]).first() - if seasonInDB: - modified_date = seasonInDB.modified_date - try: - actualSeasonModifiedTime = os.path.getmtime(url) - except FileNotFoundError: - continue - if seasonInDB is None or modified_date != actualSeasonModifiedTime: - try: - allEpisodes = [ - f - for f in os.listdir(season_dir) - if os.path.isfile(path_join(season_dir, f)) - ] - except FileNotFoundError: - continue - if seasonInDB: - seasonInDB.modified_date = modified_date - DB.session.commit() - bigSeason = season - releaseDate = season["air_date"] - episodes_number = season["episode_count"] - season_number = season["season_number"] - season_id = season["id"] - season_name = season["name"] - season_description = season["overview"] - seasonPoster = season["poster_path"] - - try: - seasonModifiedTime = os.path.getmtime(season_dir) - savedModifiedTime = ( - Seasons.query.filter_by(season_id=season_id) - .first() - .seasonModifiedTime - ) - except AttributeError: - seasonModifiedTime = os.path.getmtime(season_dir) - - if len(allEpisodes) > 0 or (seasonModifiedTime != savedModifiedTime): - try: - exists = ( - Seasons.query.filter_by(season_id=season_id).first() - is not None - ) - except sqlalchemy.exc.PendingRollbackError: - DB.session.rollback() - exists = ( - Seasons.query.filter_by(season_id=season_id).first() - is not None - ) - # number of episodes in the season - savedModifiedTime = 0 - if not exists or (seasonModifiedTime != savedModifiedTime): - season_cover_path = ( - f"https://image.tmdb.org/t/p/original{seasonPoster}" - ) - if not os.path.exists(f"{IMAGES_PATH}/{season_id}_Cover.png"): - try: - with open( - f"{IMAGES_PATH}/{season_id}_Cover.png", "wb" - ) as f: - f.write(requests.get(season_cover_path).content) - img = Image.open(f"{IMAGES_PATH}/{season_id}_Cover.png") - img = img.save( - f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp" - ) - os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") - season_cover_path = ( - f"{IMAGES_PATH}/{season_id}_Cover.webp" - ) - except Exception: - try: - with open( - f"{IMAGES_PATH}/{season_id}_Cover.png", "wb" - ) as f: - f.write(requests.get(season_cover_path).content) - img = Image.open( - f"{IMAGES_PATH}/{season_id}_Cover.png" - ) - img = img.save( - f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp" - ) - os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") - season_cover_path = ( - f"{IMAGES_PATH}/{season_id}_Cover.webp" - ) - except Exception: - season_cover_path = "/static/img/brokenImage.png" - - allSeasons = os.listdir(seriePath) - - try: - modified_date = os.path.getmtime(season_dir) - except FileNotFoundError: - modified_date = 0 - - allEpisodesInDB = Episodes.query.filter_by( - season_id=season_id - ).all() - allEpisodesInDB = [ - episode.episode_name for episode in allEpisodesInDB - ] - - exists = ( - Seasons.query.filter_by(season_id=season_id).first() is not None - ) - if not exists: - thisSeason = Seasons( - serie=serie_id, - release=releaseDate, - episodes_number=episodes_number, - season_number=season_number, - season_id=season_id, - season_name=season_name, - season_description=season_description, - cover=season_cover_path, - modified_date=modified_date, - number_of_episode_in_folder=len(allEpisodes), - ) - - try: - DB.session.add(thisSeason) - DB.session.commit() - except sqlalchemy.exc.PendingRollbackError: - DB.session.rollback() - DB.session.add(thisSeason) - DB.session.commit() - if len(allEpisodes) != len(allEpisodesInDB): - for episode in allEpisodes: - slug = f"{season_dir}/{episode}" - episodeName = slug.split("/")[-1] - guess = guessit(episodeName) - if "episode" in guess: - episodeIndex = guess["episode"] - elif "episode_title" in guess: - episodeIndex = guess["episode_title"] - elif "season" in guess and len(guess["season"]) == 2: - episodeIndex = guess["season"][1] - elif "season" in guess: - episodeIndex = guess["season"] - elif "title" in guess: - episodeIndex = guess["title"] - - else: - print( - f"Can't find the episode index of {episodeName}, data: {guess}, slug: {slug}" - ) - continue - - if isinstance(episodeIndex, list): - for i in range(len(episodeIndex)): - if isinstance(episodeIndex[i], int): - print(f"Episode index is {episodeIndex}") - episodeIndex[i] = str(episodeIndex[i]) - episodeIndex = "".join(episodeIndex) - - exists = Episodes.query.filter_by(episode_number=int(episodeIndex), season_id=season_id).first() is not None - - if not exists: - #print(f"Episode {episodeIndex} of {serieTitle} for the Season {season_id} not found") - if isinstance(season_id, int) or season_id.isnumeric(): - showEpisode = Episode() - #print(f"Get episodeInfo of : E{episodeIndex} S{season_number} of {serieTitle}") - try: - episodeDetails = showEpisode.details( - serie_id, season_number, episodeIndex - ) - except TMDbException: - #episode does not exist - continue - realEpisodeName = episodeDetails.name - episodeInfo = showEpisode.details( - serie_id, season_number, episodeIndex - ) - episode_id = episodeInfo["id"] - else: - print(f"Get episodeInfo of : E{episodeIndex} S{season_number} of {serieTitle}") - episodeInfo = bigSeason["episodes"][ - int(episodeIndex) - 1 - ] - episode_id = episodeInfo["id"] - realEpisodeName = episodeInfo["name"] - - coverEpisode = f"https://image.tmdb.org/t/p/original{episodeInfo['still_path']}" - - if not os.path.exists( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" - ): - with open( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png", - "wb", - ) as f: - f.write(requests.get(coverEpisode).content) - try: - img = Image.open( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" - ) - img = img.save( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp", - "webp", - ) - os.remove( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" - ) - coverEpisode = f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" - except Exception: - coverEpisode = f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" - try: - exists = ( - Episodes.query.filter_by( - episode_id=episode_id - ).first() - is not None - ) - except sqlalchemy.exc.PendingRollbackError: - DB.session.rollback() - exists = ( - Episodes.query.filter_by( - episode_id=episode_id - ).first() - is not None - ) - if not exists: - episodeData = Episodes( - episode_id=episode_id, - episode_name=realEpisodeName, - season_id=season_id, - episode_number=episodeIndex, - episode_description=episodeInfo["overview"], - episode_cover_path=coverEpisode, - release_date=episodeInfo["air_date"], - slug=slug, - intro_start=0.0, - intro_end=0.0, - ) - thisSeason = Seasons.query.filter_by( - season_id=season_id - ).first() - thisSeason.number_of_episode_in_folder += 1 - try: - DB.session.add(episodeData) - DB.session.commit() - except Exception: - DB.session.rollback() - DB.session.add(episodeData) - DB.session.commit() - else: - pass - - allFiles = [ - name - for name in os.listdir(allSeriesPath) - if os.path.isfile(path_join(allSeriesPath, name)) - and not name.endswith((".rar", ".zip", ".part")) - ] - for file in allFiles: - printLoading(allFiles, file, file) - - slug = path_join(allSeriesPath, file) - exists = Episodes.query.filter_by(slug=slug).first() is not None - - if not exists: - guess = guessit(file) - # print(f"\n {guess}") - title = guess["title"] - if "episode" not in guess: - season = guess["season"] - if isinstance(guess["season"], list): - season, episode = guess["season"] - else: - season = guess["season"] - episode = int(guess["episode_title"]) - else: - season = guess["season"] - episode = guess["episode"] - - seasonIndex = season - originalFile = file - episodeIndex = episode - originalSerieTitle = title - serie_modified_time = 0 - series = TV() - show = Search().tv_shows(title) - res = show[0] - serie = res.name - serie_id = res.id - details = series.details(serie_id) - episodeGroups = series.episode_groups(serie_id).results - serieEpisodes = [] - serieSeasons = [] - - for file in allFiles: - guess = guessit(file) - serie = guess["title"] - season = guess["season"] - if isinstance(season, list): - season, episode = guess["season"] - season = int(season) - if serie == originalSerieTitle: - serieEpisodes.append(file) - if season not in serieSeasons: - serieSeasons.append(season) - - file = originalFile - - defaultNbOfSeasons = details.number_of_seasons - defaultNbOfEpisodes = details.number_of_episodes - - nbSeasons = len(serieSeasons) - nbEpisodes = len(serieEpisodes) - - season_api = None - season_id = None - - if nbEpisodes <= defaultNbOfEpisodes and nbSeasons <= defaultNbOfSeasons: - for seasontmdb in details.seasons: - if str(seasontmdb.season_number) == str(seasonIndex): - season_id = seasontmdb.id - season_api = seasontmdb - break - elif len(episodeGroups) > 0: - for group in episodeGroups: - groupNbEpisodes = group.episode_count - groupNbSeasons = group.group_count - if nbEpisodes <= groupNbEpisodes and nbSeasons <= groupNbSeasons: - theGroup = Group() - seasonsInfo = theGroup.details(group.id).groups - for season in seasonsInfo: - season["season_number"] = season["order"] - season["episode_count"] = len(season["episodes"]) - season["air_date"] = season["episodes"][0]["air_date"] - season["overview"] = "" - season["poster_path"] = season["episodes"][0]["still_path"] - - season_api = seasonsInfo[seasonIndex - 1] - season_id = season_api["id"] - else: - for seasontmdb in details.seasons: - if str(seasontmdb.season_number) == str(seasonIndex): - season_id = seasontmdb.id - season_api = seasontmdb - break - - serieExists = Series.query.filter_by(id=serie_id).first() is not None - if not serieExists: - name = res.name - cover = f"https://image.tmdb.org/t/p/original{res.poster_path}" - banner = f"https://image.tmdb.org/t/p/original{res.backdrop_path}" - if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Cover.png"): - with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: - f.write(requests.get(cover).content) - try: - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") - except Exception as e: - print(f"Error with the image of the {serie}:\n{e}") - pass - - new_banner = f"{IMAGES_PATH}/{serie_id}_Banner.webp" - if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.png"): - with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: - f.write(requests.get(banner).content) - - if os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.png"): - img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") - img = img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") - os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") - else: - new_banner = f"{IMAGES_PATH}/{serie_id}_Banner.png" - - cover = f"{IMAGES_PATH}/{serie_id}_Cover.webp" - description = res["overview"] - note = res.vote_average - date = res.first_air_date - cast = details.credits.cast - runTime = details.episode_run_time - duration = "" - for i in range(len(runTime)): - if i != len(runTime) - 1: - duration += f"{str(runTime[i])}:" - else: - duration += f"{str(runTime[i])}" - serieGenre = details.genres - bandeAnnonce = details.videos.results - bande_annonce_url = "" - if len(bandeAnnonce) > 0: - for video in bandeAnnonce: - bandeAnnonceType = video.type - bandeAnnonceHost = video.site - bandeAnnonceKey = video.key - if bandeAnnonceType == "Trailer" or len(bandeAnnonce) == 1: - try: - bande_annonce_url = ( - websites_trailers[bandeAnnonceHost] - + bandeAnnonceKey - ) - break - except KeyError: - bande_annonce_url = "Unknown" - - genreList = [] - for genre in serieGenre: - genreList.append(str(genre.name)) - genreList = ",".join(genreList) - newCast = [] - cast = list(cast)[:5] - for actor in cast: - actor_id = actor.id - actorImage = ( - f"https://image.tmdb.org/t/p/original{actor.profile_path}" - ) - if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): - with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: - f.write(requests.get(actorImage).content) - img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") - img = img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") - os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") - - actorImage = f"{IMAGES_PATH}/Actor_{actor_id}.webp" - actor.profile_path = str(actorImage) - thisActor = actor_id - newCast.append(thisActor) - - person = Person() - p = person.details(actor.id) - exists = ( - Actors.query.filter_by(actor_id=actor.id).first() is not None - ) - if not exists: - actor = Actors( - name=actor.name, - actor_id=actor.id, - actor_image=actorImage, - actor_description=p.biography, - actor_birth_date=p.birthday, - actor_birth_place=p.place_of_birth, - actor_programs=f"{serie_id}", - ) - DB.session.add(actor) - DB.session.commit() - else: - actor = Actors.query.filter_by(actor_id=actor.id).first() - actor.actor_programs = f"{actor.actor_programs} {serie_id}" - DB.session.commit() - - newCast = newCast[:5] - newCast = ",".join([str(i) for i in newCast]) - isAdult = str(details["adult"]) - serieObject = Series( - id=serie_id, - name=name, - original_name=originalSerieTitle, - genre=genreList, - duration=duration, - description=description, - cast=newCast, - bande_annonce_url=bande_annonce_url, - cover=cover, - banner=new_banner, - note=note, - date=date, - serie_modified_time=serie_modified_time, - adult=isAdult, - library_name=library_name, - ) - DB.session.add(serieObject) - DB.session.commit() - - # print(f"Pour {file}, serie_id = {serie_id} et season_id = {season_id}") - - seasonExists = ( - Seasons.query.filter_by(serie=serie_id, season_id=season_id).first() - is not None - ) - - if season_api and not seasonExists: - season = season_api - releaseDate = season.air_date - episodes_number = season.episode_count - season_number = season.season_number - season_name = season.name - season_description = season.overview - seasonPoster = season.poster_path - - savedModifiedTime = 0 - - season_cover_path = f"https://image.tmdb.org/t/p/original{seasonPoster}" - if not os.path.exists(f"{IMAGES_PATH}/{season_id}_Cover.png"): - with open(f"{IMAGES_PATH}/{season_id}_Cover.png", "wb") as f: - f.write(requests.get(season_cover_path).content) - try: - img = Image.open(f"{IMAGES_PATH}/{season_id}_Cover.png") - img = img.save(f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp") - os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") - season_cover_path = f"{IMAGES_PATH}/{season_id}_Cover.webp" - except Exception: - with open(f"{IMAGES_PATH}/{season_id}_Cover.png", "wb") as f: - f.write(requests.get(season_cover_path).content) - try: - img = Image.open(f"{IMAGES_PATH}/{season_id}_Cover.png") - img = img.save( - f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp" - ) - os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") - season_cover_path = f"{IMAGES_PATH}/{season_id}_Cover.webp" - except Exception: - season_cover_path = "/static/img/brokenImage.png" - - try: - modified_date = os.path.getmtime(f"{allSeriesPath}{slug}") - except Exception: - modified_date = 0 - - seasonObject = Seasons( - serie=serie_id, - season_id=season_id, - season_name=season_name, - season_description=season_description, - cover=season_cover_path, - season_number=season_number, - episodes_number=episodes_number, - release=releaseDate, - modified_date=modified_date, - number_of_episode_in_folder=0, - ) - - DB.session.add(seasonObject) - DB.session.commit() - - bigSeason = season_api - - showEpisode = Episode() - season_number = seasonIndex - serie_id, season_number, episodeIndex = ( - str(serie_id), - str(season_number), - str(episodeIndex), - ) - - try: - exists = ( - Episodes.query.filter_by( - episode_number=episodeIndex, season_id=season_id - ).first() - is not None - ) - except sqlalchemy.exc.PendingRollbackError: - DB.session.rollback() - exists = ( - Episodes.query.filter_by( - episode_number=episodeIndex, season_id=season_id - ).first() - is not None - ) - if not exists: - if isinstance(season_id, int) or season_id.isnumeric(): - showEpisode = Episode() - episodeDetails = showEpisode.details( - serie_id, season_number, episodeIndex - ) - realEpisodeName = episodeDetails.name - episodeInfo = showEpisode.details( - serie_id, season_number, episodeIndex - ) - episode_id = episodeInfo.id - else: - episodeInfo = bigSeason["episodes"][int(episodeIndex) - 1] - episode_id = episodeInfo["id"] - realEpisodeName = episodeInfo["name"] - - coverEpisode = ( - f"https://image.tmdb.org/t/p/original{episodeInfo.still_path}" - ) - - if not os.path.exists( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" - ): - with open( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png", "wb" - ) as f: - f.write(requests.get(coverEpisode).content) - try: - img = Image.open( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" - ) - img = img.save( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp", "webp" - ) - os.remove(f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png") - coverEpisode = ( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" - ) - except Exception: - coverEpisode = ( - f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" - ) - try: - exists = ( - Episodes.query.filter_by(episode_id=episode_id).first() - is not None - ) - except sqlalchemy.exc.PendingRollbackError: - DB.session.rollback() - exists = ( - Episodes.query.filter_by(episode_id=episode_id).first() - is not None - ) - if not exists: - # Mprint(f"Pour le fichier {file}, j'ai trouvé : \n - episode_number: {episodeIndex} \n - season_id: {season_id} \n - Serie: {serie_id} \n - Episode ID: {episode_id}") - - episodeData = Episodes( - episode_id=episode_id, - episode_name=realEpisodeName, - season_id=season_id, - episode_number=episodeIndex, - episode_description=episodeInfo.overview, - episode_cover_path=coverEpisode, - release_date=episodeInfo.air_date, - slug=slug, - intro_start=0.0, - intro_end=0.0, - ) - thisSeason = Seasons.query.filter_by(season_id=season_id).first() - thisSeason.number_of_episode_in_folder += 1 - try: - DB.session.add(episodeData) - DB.session.commit() - except Exception: - DB.session.rollback() - DB.session.add(episodeData) - DB.session.commit() - - allSeriesInDB = Series.query.all() - allSeriesInDB = [ - serie.original_name - for serie in allSeriesInDB - if serie.library_name == library_name - ] - - for serie in allSeriesInDB: - serie_id = Series.query.filter_by(original_name=serie).first().id - allSeasons = Seasons.query.filter_by(serie=serie_id).all() - if serie not in allSeries: - for season in allSeasons: - season_id = season.season_id - allEpisodes = Episodes.query.filter_by(season_id=season_id).all() - for episode in allEpisodes: - if not os.path.exists(episode.slug): - try: - DB.session.delete(episode) - DB.session.commit() - except Exception: - DB.session.rollback() - DB.session.delete(episode) - DB.session.commit() - - for season in allSeasons: - season_id = season.season_id - allEpisodes = Episodes.query.filter_by(season_id=season_id).all() - if len(allEpisodes) == 0: - try: - DB.session.delete(season) - DB.session.commit() - except Exception: - DB.session.rollback() - DB.session.delete(season) - DB.session.commit() - allSeasons = Seasons.query.filter_by(serie=serie_id).all() - if len(allSeasons) == 0: - try: - DB.session.delete(Series.query.filter_by(id=serie_id).first()) - DB.session.commit() - except Exception: - DB.session.rollback() - DB.session.delete(Series.query.filter_by(id=serie_id).first()) - DB.session.commit() - - -def getGames(library_name): - allGamesPath = Libraries.query.filter_by(lib_name=library_name).first().lib_folder - try: - allConsoles = [ - name - for name in os.listdir(allGamesPath) - if os.path.isdir(path_join(allGamesPath, name)) - and not name.endswith((".rar", ".zip", ".part")) - ] - except Exception: - return - - for console in allConsoles: - if os.listdir(f"{allGamesPath}/{console}") == []: - allConsoles.remove(console) - saidPS1 = False - supportedConsoles = [ - "3DO", - "Amiga", - "Atari 2600", - "Atari 5200", - "Atari 7800", - "Atari Jaguar", - "Atari Lynx", - "GB", - "GBA", - "GBC", - "N64", - "NDS", - "NES", - "SNES", - "Neo Geo Pocket", - "PSX", - "Sega 32X", - "Sega CD", - "Sega Game Gear", - "Sega Master System", - "Sega Mega Drive", - "Sega Saturn", - "PS1", - ] - supportedFileTypes = [ - ".zip", - ".adf", - ".adz", - ".dms", - ".fdi", - ".ipf", - ".hdf", - ".lha", - ".slave", - ".info", - ".cdd", - ".nrg", - ".mds", - ".chd", - ".uae", - ".m3u", - ".a26", - ".a52", - ".a78", - ".j64", - ".lnx", - ".gb", - ".gba", - ".gbc", - ".n64", - ".nds", - ".nes", - ".ngp", - ".psx", - ".sfc", - ".smc", - ".smd", - ".32x", - ".cd", - ".gg", - ".md", - ".sat", - ".sms", - ] - for console in allConsoles: - if console not in supportedConsoles: - print( - f"{console} is not supported or the console name is not correct, here is the list of supported consoles: \n{', '.join(supportedConsoles)} rename the folder to one of these names if it's the correct console" - ) - break - - printLoading(allConsoles, console, console) - - allFiles = os.listdir(f"{allGamesPath}/{console}") - for file in allFiles: - # get all games in the db - allGamesInDB = Games.query.filter_by( - library_name=library_name, console=console - ).all() - allGamesInDB = [game.slug for game in allGamesInDB] - numberOfGamesInDB = len(allGamesInDB) - numberOfGamesInFolder = len(allFiles) - if numberOfGamesInDB < numberOfGamesInFolder: - gameSlug = f"{allGamesPath}/{console}/{file}" - exists = Games.query.filter_by(slug=gameSlug).first() is not None - if file.endswith(tuple(supportedFileTypes)) and not exists: - newFileName = file - newFileName = re.sub(r"\d{5} - ", "", newFileName) - newFileName = re.sub(r"\d{4} - ", "", newFileName) - newFileName = re.sub(r"\d{3} - ", "", newFileName) - newFileName, extension = os.path.splitext(newFileName) - newFileName = newFileName.rstrip() - newFileName = f"{newFileName}{extension}" - os.rename( - f"{allGamesPath}/{console}/{file}", - f"{allGamesPath}/{console}/{newFileName}", - ) - - printLoading(allFiles, file, newFileName) - - file = newFileName - - file, extension = os.path.splitext(file) - - gameIGDB = searchGame(file, console) - - if gameIGDB is not None and gameIGDB != {} and not exists: - gameName = gameIGDB["title"] - gameCover = gameIGDB["cover"] - gameDescription = gameIGDB["description"] - gameNote = gameIGDB["note"] - gameDate = gameIGDB["date"] - gameGenre = gameIGDB["genre"] - game_id = gameIGDB["id"] - else: - gameName = file - gameCover = "/static/img/broken.webp" - gameDescription = "" - gameNote = 0 - gameDate = "" - gameGenre = "" - game_id = str(uuid.uuid4()) - - gameRealTitle = newFileName - gameConsole = console - - game = Games( - console=gameConsole, - id=game_id, - title=gameName, - real_title=gameRealTitle, - cover=gameCover, - description=gameDescription, - note=gameNote, - date=gameDate, - genre=gameGenre, - slug=gameSlug, - library_name=library_name, - ) - DB.session.add(game) - DB.session.commit() - - elif console == "PS1" and file.endswith(".cue") and not exists: - if not saidPS1: - print( - "You need to zip all our .bin files and the .cue file in one .zip file to being able to play it" - ) - saidPS1 = True - - value = config["ChocolateSettings"]["compressPS1Games"] - if value.lower() == "true": - index = allFiles.index(file) - 1 - - allBins = [] - while allFiles[index].endswith(".bin"): - allBins.append(allFiles[index]) - index -= 1 - - fileName, extension = os.path.splitext(file) - with zipfile.ZipFile( - f"{allGamesPath}/{console}/{fileName}.zip", "w" - ) as zipObj: - for binFiles in allBins: - zipObj.write( - f"{allGamesPath}/{console}/{binFiles}", binFiles - ) - zipObj.write(f"{allGamesPath}/{console}/{file}", file) - for binFiles in allBins: - os.remove(f"{allGamesPath}/{console}/{binFiles}") - os.remove(f"{allGamesPath}/{console}/{file}") - file = f"{fileName}.zip" - newFileName = file - newFileName = re.sub(r"\d{5} - ", "", newFileName) - newFileName = re.sub(r"\d{4} - ", "", newFileName) - newFileName = re.sub(r"\d{3} - ", "", newFileName) - newFileName, extension = os.path.splitext(newFileName) - newFileName = newFileName.rstrip() - newFileName = f"{newFileName}{extension}" - os.rename( - f"{allGamesPath}/{console}/{file}", - f"{allGamesPath}/{console}/{newFileName}", - ) - file = newFileName - while ".." in newFileName: - newFileName = newFileName.replace("..", ".") - try: - os.rename( - f"{allGamesPath}/{console}/{file}", - f"{allGamesPath}/{console}/{newFileName}", - ) - except FileExistsError: - os.remove(f"{allGamesPath}/{console}/{file}") - file, extension = os.path.splitext(file) - - gameIGDB = searchGame(file, console) - if gameIGDB is not None and gameIGDB != {}: - gameName = gameIGDB["title"] - gameRealTitle = newFileName - gameCover = gameIGDB["cover"] - - with open( - f"{allGamesPath}/{console}/{gameRealTitle}.png", "wb" - ) as f: - f.write(requests.get(gameCover).content) - gameCover = f"{allGamesPath}/{console}/{gameRealTitle}.png" - img = Image.open(gameCover) - img = img.save( - f"{allGamesPath}/{console}/{gameRealTitle}.webp", "webp" - ) - os.remove(gameCover) - gameCover = f"{allGamesPath}/{console}/{gameRealTitle}.webp" - - gameDescription = gameIGDB["description"] - gameNote = gameIGDB["note"] - gameDate = gameIGDB["date"] - gameGenre = gameIGDB["genre"] - game_id = gameIGDB["id"] - gameConsole = console - gameSlug = f"{allGamesPath}/{console}/{newFileName}" - game = Games.query.filter_by(slug=gameSlug).first() - print(game) - if not game: - game = Games( - console=gameConsole, - id=game_id, - title=gameName, - real_title=gameRealTitle, - cover=gameCover, - description=gameDescription, - note=gameNote, - date=gameDate, - genre=gameGenre, - slug=gameSlug, - ) - DB.session.add(game) - DB.session.commit() - elif not file.endswith(".bin") and not exists: - print( - f"{file} is not supported, here's the list of supported files : \n{','.join(supportedFileTypes)}" - ) - gamesInDb = Games.query.filter_by(console=console).all() - gamesInDb = [game.real_title for game in gamesInDb] - for game in gamesInDb: - if game not in allFiles: - game = Games.query.filter_by(console=console, real_title=game).first() - DB.session.delete(game) - DB.session.commit() - - -def getOthersVideos(library, allVideosPath=None): - if not allVideosPath: - allVideosPath = Libraries.query.filter_by(lib_name=library).first().lib_folder - try: - allVideos = os.listdir(allVideosPath) - except Exception: - return - else: - allVideos = os.listdir(f"{allVideosPath}") - - supportedVideoTypes = [ - ".mp4", - ".webm", - ".mkv", - ".avi", - ".mov", - ".wmv", - ".flv", - ".mpg", - ".mpeg", - ] - - allDirectories = [ - video for video in allVideos if os.path.isdir(f"{allVideosPath}/{video}") - ] - allVideos = [ - video - for video in allVideos - if os.path.splitext(video)[1] in supportedVideoTypes - ] - - for directory in allDirectories: - directoryPath = f"{allVideosPath}/{directory}" - getOthersVideos(library, directoryPath) - - for video in allVideos: - title, extension = os.path.splitext(video) - - printLoading(allVideos, video, title) - - slug = f"{allVideosPath}/{video}" - exists = OthersVideos.query.filter_by(slug=slug).first() is not None - if not exists: - with open(slug, "rb") as f: - video_hash = zlib.crc32(f.read()) - - # Conversion du hash en chaîne hexadécimale - video_hash_hex = hex(video_hash)[2:] - - # Récupération des 10 premiers caractères - video_hash = video_hash_hex[:10] - videoDuration = length_video(slug) - middle = videoDuration // 2 - banner = f"{IMAGES_PATH}/Other_Banner_{library}_{video_hash}.webp" - command = [ - "ffmpeg", - "-i", - slug, - "-vf", - f"select='eq(n,{middle})'", - "-vframes", - "1", - f"{banner}", - "-y", - ] - try: - subprocess.run( - command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL - ) - if os.path.getsize(f"{banner}") == 0: - generateImage(title, library, f"{banner}") - banner = f"{IMAGES_PATH}/Other_Banner_{library}_{video_hash}.webp" - except Exception: - banner = "/static/img/broken.webp" - video = OthersVideos( - video_hash=video_hash, - title=title, - slug=slug, - banner=banner, - duration=videoDuration, - library_name=library, - ) - DB.session.add(video) - DB.session.commit() - - for video in OthersVideos.query.filter_by(library_name=library).all(): - path = video.slug - if not os.path.exists(path): - DB.session.delete(video) - DB.session.commit() - - -def getMusics(library): - allMusicsPath = Libraries.query.filter_by(lib_name=library).first().lib_folder - allMusics = os.listdir(allMusicsPath) - - supportedMusicTypes = [".mp3", ".wav", ".ogg", ".flac"] - - allArtists = [ - music for music in allMusics if os.path.isdir(f"{allMusicsPath}/{music}") - ] - - for artist in allArtists: - filesAndDirs = os.listdir(f"{allMusicsPath}/{artist}") - allAlbums = [ - dire - for dire in filesAndDirs - if os.path.isdir(f"{allMusicsPath}/{artist}/{dire}") - ] - allFiles = [ - file - for file in filesAndDirs - if os.path.isfile(f"{allMusicsPath}/{artist}/{file}") - and os.path.splitext(file)[1] in supportedMusicTypes - ] - artist_id = createArtist(artist, library) - artistName = artist - albumsInDB = Albums.query.filter_by(artist_id=artist_id).all() - tracksInDB = Tracks.query.filter_by(artist_id=artist_id).all() - albumsInDB = len([album for album in albumsInDB]) - tracksInDB = len([track for track in tracksInDB]) - if albumsInDB == len(allAlbums) and tracksInDB == len(allFiles): - continue - - startPath = f"{allMusicsPath}/{artist}" - - for album in allAlbums: - albumGuessedData = guessit(album) - if "title" in albumGuessedData: - albumName = albumGuessedData["title"] - else: - albumName, extension = os.path.splitext(album) - - allTracks = os.listdir(f"{startPath}/{album}") - allTracks = [ - track - for track in allTracks - if os.path.splitext(track)[1] in supportedMusicTypes - ] - album_id = createAlbum(albumName, artist_id, allTracks, library) - - for track in allTracks: - slug = f"{startPath}/{album}/{track}" - - exists = Tracks.query.filter_by(slug=slug).first() is not None - if exists: - continue - - title, extension = os.path.splitext(track) - printLoading(allTracks, track, title) - - tags = TinyTag.get(slug, image=True) - - image = tags.get_image() - imagePath = f"{IMAGES_PATH}/Album_{album_id}.webp" - if image is not None: - if not os.path.exists(imagePath): - image = Image.open(io.BytesIO(image)) - image = image.save(imagePath, "webp") - elif not os.path.exists(imagePath): - print(f"L'album {album} n'a pas d'image") - getAlbumImage(album, imagePath) - - if tags.title is not None and tags.title != "" and tags.title != " ": - title = tags.title - else: - guessedData = guessit(title) - - title = "" - - if "title" in guessedData: - title = guessedData["title"] - if title.isdigit(): - title = guessedData["alternative_title"] - else: - if isinstance("episode", list) and "season" in guessedData: - title = f"{guessedData['season']}{' '.join(guessedData['episode'][1])}" - elif "episode" in guessedData and "season" in guessedData: - title = f"{guessedData['season']}{guessedData['episode']}" - - if "release_group" in guessedData: - title += f" ({guessedData['release_group']}" - - imagePath = imagePath.replace(dir_path, "") - - track = Tracks( - name=title, - slug=slug, - album_id=album_id, - artist_id=artist_id, - duration=tags.duration, - cover=imagePath, - library_name=library, - ) - DB.session.add(track) - DB.session.commit() - - for track in allFiles: - slug = f"{startPath}/{track}" - - exists = Tracks.query.filter_by(slug=slug).first() is not None - if exists: - continue - - title, extension = os.path.splitext(track) - printLoading(allFiles, track, title) - - tags = TinyTag.get(slug, image=True) - - image = tags.get_image() - imagePath = f"{IMAGES_PATH}/Album_{artist_id}.webp" - if image is not None: - if not os.path.exists(imagePath): - image = Image.open(io.BytesIO(image)) - image = image.save(imagePath, "webp") - elif not os.path.exists(imagePath): - getArtistImage(artistName, imagePath) - - if tags.title is not None and tags.title != "" and tags.title != " ": - title = tags.title - else: - guessedData = guessit(title) - - title = "" - - if "title" in guessedData: - title = guessedData["title"] - if title.isdigit(): - title = guessedData["alternative_title"] - else: - if isinstance("episode", list) and "season" in guessedData: - title = f"{guessedData['season']}{' '.join(guessedData['episode'][1])}" - elif "episode" in guessedData and "season" in guessedData: - title = f"{guessedData['season']}{guessedData['episode']}" - - if "release_group" in guessedData: - title += f" ({guessedData['release_group']}" - - imagePath = imagePath.replace(dir_path, "") - - track = Tracks( - name=title, - slug=slug, - album_id=0, - artist_id=artist_id, - duration=tags.duration, - cover=imagePath, - library_name=library, - ) - DB.session.add(track) - DB.session.commit() - - allTracks = Tracks.query.filter_by(library_name=library).all() - for track in allTracks: - path = track.slug - if not os.path.exists(path): - DB.session.delete(track) - DB.session.commit() - - allAlbums = Albums.query.filter_by(library_name=library).all() - for album in allAlbums: - tracks = album.tracks - if tracks == "": - DB.session.delete(album) - DB.session.commit() - continue - - allArtists = Artists.query.filter_by(library_name=library).all() - for artist in allArtists: - artist_id = artist.id - albums = Albums.query.filter_by(artist_id=artist_id).all() - tracks = Tracks.query.filter_by(artist_id=artist_id).all() - if len(albums) == 0 and len(tracks) == 0: - DB.session.delete(artist) - DB.session.commit() - continue - - -def getBooks(library): - allBooks = Libraries.query.filter_by(lib_name=library) - allBooksPath = allBooks.first().lib_folder - - allBooks = os.walk(allBooksPath) - books = [] - - for root, dirs, files in allBooks: - for file in files: - path = f"{root}/{file}".replace("\\", "/") - - if file.endswith((".pdf", ".epub", ".cbz", ".cbr")): - books.append(path) - - allBooks = books - - imageFunctions = { - ".pdf": getPDFCover, - ".epub": getEPUBCover, - ".cbz": getCBZCover, - ".cbr": getCBRCover, - } - - for book in allBooks: - name, extension = os.path.splitext(book) - name = name.split("/")[-1] - - printLoading(allBooks, book, name) - - slug = f"{book}" - - exists = Books.query.filter_by(slug=slug).first() is not None - if not exists and not os.path.isdir(slug): - if extension in imageFunctions.keys(): - book_cover, book_type = "temp", "temp" - book = Books( - title=name, - slug=slug, - book_type=book_type, - cover=book_cover, - library_name=library, - ) - DB.session.add(book) - DB.session.commit() - book_id = book.id - book_cover, book_type = imageFunctions[extension](slug, name, book_id) - book.cover = book_cover - book.book_type = book_type - DB.session.commit() - allBooksInDb = Books.query.filter_by(library_name=library).all() - for book in allBooksInDb: - if not os.path.exists(book.slug): - DB.session.delete(book) - DB.session.commit() - - -def getPDFCover(path, name, id): - pdfDoc = fitz.open(path) - # Récupérez la page demandée - page = pdfDoc[0] - # Créez une image à partir de la page - pix = page.get_pixmap() - # Enregistre l'image - img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) - if os.path.exists(f"{IMAGES_PATH}/Books_Banner_{id}.webp"): - os.remove(f"{IMAGES_PATH}/Books_Banner_{id}.webp") - - img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") - path = f"{IMAGES_PATH}/Books_Banner_{id}.webp" - return path, "PDF" - - -def getEPUBCover(path, name, id): - pdfDoc = fitz.open(path) - # Récupérez la page demandée - page = pdfDoc[0] - # Créez une image à partir de la page - pix = page.get_pixmap() - # Enregistre l'image - img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) - - if os.path.exists(f"{IMAGES_PATH}/Books_Banner_{id}.webp"): - os.remove(f"{IMAGES_PATH}/Books_Banner_{id}.webp") - - img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") - path = f"{IMAGES_PATH}/Books_Banner_{id}.webp" - - return path, "EPUB" - - -def getCBZCover(path, name, id): - try: - with zipfile.ZipFile(path, "r") as zip_ref: - # Parcourt tous les fichiers à l'intérieur du CBZ - for file in zip_ref.filelist: - # Vérifie si le fichier est une image - if file.filename.endswith(".jpg") or file.filename.endswith(".png"): - # Ouvre le fichier image - with zip_ref.open(file) as image_file: - img = Image.open(io.BytesIO(image_file.read())) - # Enregistre l'image - img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") - break - elif file.filename.endswith("/"): - with zip_ref.open(file) as image_file: - for file in zip_ref.filelist: - if file.filename.endswith(".jpg") or file.filename.endswith( - ".png" - ): - # Ouvre le fichier image - with zip_ref.open(file) as image_file: - img = Image.open(io.BytesIO(image_file.read())) - # Enregistre l'image - img.save( - f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp" - ) - break - return f"{IMAGES_PATH}/Books_Banner_{id}.webp", "CBZ" - except Exception: - return getCBRCover(path, name, id) - - -def getCBRCover(path, name, id): - name = name.replace(" ", "_").replace("#", "") - try: - with rarfile.RarFile(path, "r") as rar_ref: - # Parcourt tous les fichiers à l'intérieur du CBR - for file in rar_ref.infolist(): - # Vérifie si le fichier est une image - if file.filename.endswith(".jpg") or file.filename.endswith(".png"): - # Ouvre le fichier image - with rar_ref.open(file) as image_file: - img = Image.open(io.BytesIO(image_file.read())) - # Enregistre l'image - img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") - break - elif file.filename.endswith("/"): - with rar_ref.open(file) as image_file: - img = Image.open(io.BytesIO(image_file.read())) - # Enregistre l'image - img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") - break - - return f"{IMAGES_PATH}/Books_Banner_{id}.webp", "CBR" - except rarfile.NotRarFile: - return getCBZCover(path, name, id) +import deezer +import requests +import os +import rarfile +import zipfile +import zlib +import ast +import datetime +import sqlalchemy +import re +import subprocess +import io +import uuid +import fitz + +from guessit import guessit +from Levenshtein import distance as lev +from tmdbv3api import TV, Episode, Movie, Person, Search, Group +from tmdbv3api.as_obj import AsObj +from tmdbv3api.exceptions import TMDbException +from PIL import Image +from tinytag import TinyTag +from deep_translator import GoogleTranslator + +from . import DB, get_dir_path, config, IMAGES_PATH +from .tables import ( + Libraries, + Movies, + Series, + Artists, + Albums, + Tracks, + Episodes, + Seasons, + Actors, + Games, + OthersVideos, + Books, +) + + +from .utils.utils import path_join + +dir_path = get_dir_path() + +deezer = deezer.Client() + +image_requests = requests.Session() + +genre_list = { + 12: "Aventure", + 14: "Fantastique", + 16: "Animation", + 18: "Drama", + 27: "Horreur", + 28: "Action", + 35: "Comédie", + 36: "Histoire", + 37: "Western", + 53: "Thriller", + 80: "Crime", + 99: "Documentaire", + 878: "Science-fiction", + 9648: "Mystère", + 10402: "Musique", + 10749: "Romance", + 10751: "Famille", + 10752: "War", + 10759: "Action & Adventure", + 10762: "Kids", + 10763: "News", + 10764: "Reality", + 10765: "Sci-Fi & Fantasy", + 10766: "Soap", + 10767: "Talk", + 10768: "War & Politics", + 10769: "Western", + 10770: "TV Movie", +} + +websites_trailers = { + "YouTube": "https://www.youtube.com/embed/", + "Dailymotion": "https://www.dailymotion.com/video/", + "Vimeo": "https://vimeo.com/", +} + + +def transformToDict(obj): + if isinstance(obj, list): + return obj + if isinstance(obj, AsObj): + obj = str(obj) + obj = ast.literal_eval(obj) + return obj + return obj + + +def transformToList(obj): + if isinstance(obj, AsObj): + return list(obj) + if isinstance(obj, list): + return obj + return obj.replace('"', '\\"') + + +def length_video(path: str) -> float: + seconds = subprocess.run( + [ + "ffprobe", + "-v", + "error", + "-show_entries", + "format=duration", + "-of", + "default=noprint_wrappers=1:nokey=1", + path, + ], + stdout=subprocess.PIPE, + text=True, + ) + try: + return float(seconds.stdout) + except Exception: + return 0 + + +def createArtist(artistName, lib): + exists = Artists.query.filter_by(name=artistName).first() is not None + if exists: + return Artists.query.filter_by(name=artistName).first().id + + artists = deezer.search_artists(artistName) + artist = artists[0] + artist_id = artist.id + + exists = Artists.query.filter_by(id=artist_id).first() is not None + if exists: + return artist_id + + cover = artist.picture_big + + path = f"{IMAGES_PATH}/Artist_{artist_id}.png" + # Récupération de l'image, conversion en webp et sauvegarde dans le dossier static au nom "Artist_{artist_id}.webp" + with open(path, "wb") as f: + f.write(image_requests.get(cover).content) + + try: + img = Image.open(path) + img.save(f"{IMAGES_PATH}/Artist_{artist_id}.webp", "webp") + os.remove(f"{IMAGES_PATH}/Artist_{artist_id}.png") + path = f"{IMAGES_PATH}/Artist_{artist_id}.webp" + img.close() + except Exception: + pass + + path = path.replace(dir_path, "") + + artist = Artists(id=artist_id, name=artistName, cover=path, library_name=lib) + DB.session.add(artist) + DB.session.commit() + + return artist_id + + +def createAlbum(name, artist_id, tracks=[], library=""): + exists = ( + Albums.query.filter_by(dir_name=name, artist_id=artist_id).first() is not None + ) + if exists: + Albums.query.filter_by( + dir_name=name, artist_id=artist_id + ).first().tracks = ",".join(tracks) + DB.session.commit() + return Albums.query.filter_by(dir_name=name, artist_id=artist_id).first().id + + albums = deezer.search_albums( + f"{Artists.query.filter_by(id=artist_id).first().name} - {name}" + ) + + # pour chaque album trouvé, on vérifie si le nom de est proche du nom de l'album qu'on cherche + if len(albums) == 0: + return None + best_match = albums[0] + + for album in albums: + if lev(name, album.title) < lev(name, best_match.title): + best_match = album + elif lev(name, album.title) == lev(name, best_match.title): + best_match = best_match + if lev(name, best_match.title) == 0: + break + + album = best_match + + album_id = album.id + exist = Albums.query.filter_by(id=album_id, artist_id=artist_id).first() is not None + if exist: + return album_id + album_name = album.title + cover = album.cover_big + + path = f"{IMAGES_PATH}/Album_{album_id}.png" + + with open(path, "wb") as f: + f.write(image_requests.get(cover).content) + + try: + img = Image.open(path) + img.save(f"{IMAGES_PATH}/Album_{album_id}.webp", "webp") + os.remove(path) + path = f"{IMAGES_PATH}/Album_{album_id}.webp" + img.close() + except Exception: + pass + + path = path.replace(dir_path, "") + + tracks = ",".join(tracks) + + album = Albums( + id=album_id, + name=album_name, + dir_name=name, + artist_id=artist_id, + cover=path, + tracks=tracks, + library_name=library, + ) + DB.session.add(album) + DB.session.commit() + + return album_id + + +def getAlbumImage(album_name, path): + albums = deezer.search_albums(album_name) + album = albums[0] + album_name = album.title + cover = album.cover_big + + # Récupération de l'image, conversion en webp et sauvegarde dans le dossier static au nom "Album_{album_id}.webp" + with open(path, "wb") as f: + f.write(image_requests.get(cover).content) + + try: + img = Image.open(path) + img.save(path, "webp") + img.close() + except Exception: + pass + + return path + + +def getArtistImage(artist_name, path): + artist = deezer.search_artists(artist_name)[0] + artist_name = artist.name + cover = artist.picture_big + + with open(path, "wb") as f: + f.write(image_requests.get(cover).content) + + try: + img = Image.open(path) + img.save(path, "webp") + img.close() + except Exception: + pass + + return path + + +def generateImage(title, librairie, banner): + from PIL import Image, ImageDraw, ImageFont + + largeur = 1280 + hauteur = 720 + image = Image.new("RGB", (largeur, hauteur), color="#1d1d1d") + + # Ajouter les textes au centre de l'image + draw = ImageDraw.Draw(image) + + # Charger la police Poppins + font_path = f"{dir_path}/static/fonts/Poppins-Medium.ttf" + font_title = ImageFont.truetype(font_path, size=70) + font_librairie = ImageFont.truetype(font_path, size=50) + + # Positionner les textes au centre de l'image + titre_larg, titre_haut = draw.textsize(title, font=font_title) + librairie_larg, librairie_haut = draw.textsize(librairie, font=font_librairie) + x_title = int((largeur - titre_larg) / 2) + y_title = int((hauteur - titre_haut - librairie_haut - 50) / 2) + x_librairie = int((largeur - librairie_larg) / 2) + y_librairie = y_title + titre_haut + 50 + + # Ajouter le texte du titre + draw.text((x_title, y_title), title, font=font_title, fill="white", align="center") + + # Ajouter le texte de la librairie + draw.text( + (x_librairie, y_librairie), + librairie, + font=font_librairie, + fill="white", + align="center", + ) + + # Enregistrer l'image + os.remove(banner) + image.save(banner, "webp") + + +def is_connected(): + try: + requests.get("https://ww.google.com/").status_code + return True + except Exception: + return False + + +def printLoading(filesList, actualFile, title): + terminal_size = os.get_terminal_size().columns - 1 + try: + index = filesList.index(actualFile) + 1 + except Exception: + index = 0 + percentage = index * 100 / len(filesList) + + loading_first_part = ("•" * int(percentage * 0.2))[:-1] + loading_first_part = f"{loading_first_part}➤" + loading_second_part = "•" * (20 - int(percentage * 0.2)) + + loading = f"{str(int(percentage)).rjust(3)}% | [\33[32m{loading_first_part}\33[31m{loading_second_part}\33[0m] | {title} | {index}/{len(filesList)}" + loading2 = loading + " " * (terminal_size - len(loading)) + + if len(loading2) > terminal_size: + loading2 = loading2[: terminal_size - 3] + "..." + + print("\033[?25l", end="") + print(loading2, end="\r", flush=True) + + +def searchGame(game, console): + url = f"https://www.igdb.com/search_autocomplete_all?q={game.replace(' ', '%20')}" + return IGDBRequest(url, console) + + +def translate(string): + language = config["ChocolateSettings"]["language"] + if language == "EN": + return string + translated = GoogleTranslator(source="english", target=language.lower()).translate( + string + ) + return translated + + +def IGDBRequest(url, console): + custom_headers = { + "User-Agent": "Mozilla/5.0 (X11; UwUntu; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0", + "Accept": "*/*", + "X-Requested-With": "XMLHttpRequest", + "Origin": url, + "DNT": "1", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Referer": url, + "Connection": "keep-alive", + "Pragma": "no-cache", + "Cache-Control": "no-cache", + } + response = requests.request("GET", url, headers=custom_headers) + + client_id = config.get("APIKeys", "IGDBID") + client_secret = config.get("APIKeys", "IGDBSECRET") + + if response.status_code == 200 and client_id and client_secret: + grant_type = "client_credentials" + get_access_token = f"https://id.twitch.tv/oauth2/token?client_id={client_id}&client_secret={client_secret}&grant_type={grant_type}" + token = requests.request("POST", get_access_token) + token = token.json() + if "message" in token and token["message"] == "invalid client secret": + print("Invalid client secret") + return None + if "access_token" not in token: + return None + token = token["access_token"] + + headers = { + "Accept": "application/json", + "Authorization": f"Bearer {token}", + "Client-ID": client_id, + } + + games = response.json()["game_suggest"] + + for i in games: + game = i + game_id = game["id"] + url = "https://api.igdb.com/v4/games" + body = f"fields name, cover.*, summary, total_rating, first_release_date, genres.*, platforms.*; where id = {game_id};" + response = requests.request("POST", url, headers=headers, data=body) + if len(response.json()) == 0: + break + game = response.json()[0] + if "platforms" in game: + game_platforms = game["platforms"] + try: + platforms = [] + + for i in game_platforms: + if "abbreviation" not in i: + platforms.append(i["alternative_name"]) + else: + platforms.append(i["abbreviation"]) + + real_console_name = { + "GB": "Game Boy", + "GBA": "Game Boy Advance", + "GBC": "Game Boy Color", + "N64": "Nintendo 64", + "NES": "Nintendo Entertainment System", + "NDS": "Nintendo DS", + "SNES": "Super Nintendo Entertainment System", + "Sega Master System": "Sega Master System", + "Sega Mega Drive": "Sega Mega Drive", + "PS1": "PS1", + } + + if ( + real_console_name[console] not in platforms + and console not in platforms + ): + continue + if "total_rating" not in game: + game["total_rating"] = "Unknown" + if "genres" not in game: + game["genres"] = [{"name": "Unknown"}] + if "summary" not in game: + game["summary"] = "Unknown" + if "first_release_date" not in game: + game["first_release_date"] = "Unknown" + if "cover" not in game: + game["cover"] = { + "url": "//images.igdb.com/igdb/image/upload/t_cover_big/nocover.png" + } + + game["summary"] = translate(game["summary"]) + game["genres"][0]["name"] = translate(game["genres"][0]["name"]) + + genres = [] + for genre in game["genres"]: + genres.append(genre["name"]) + genres = ", ".join(genres) + + game_data = { + "title": game["name"], + "cover": game["cover"]["url"].replace("//", "https://"), + "description": game["summary"], + "note": game["total_rating"], + "date": game["first_release_date"], + "genre": genres, + "id": game["id"], + } + return game_data + except Exception: + continue + return None + + +def getMovies(library_name): + all_movies_not_sorted = [] + path = Libraries.query.filter_by(lib_name=library_name).first().lib_folder + film_file_list = [] + try: + movie_files = os.listdir(path) + except Exception: + return + for movie_file in movie_files: + if not movie_file.endswith((".rar", ".zip", ".part")): + film_file_list.append(movie_file) + + if not is_connected(): + return + + film_file_list.sort() + movie = Movie() + + for searchedFilm in film_file_list: + movieTitle = searchedFilm + if os.path.isdir(path_join(path, searchedFilm)): + the_path = path_join(path, searchedFilm) + searchedFilm = path_join(searchedFilm, os.listdir(the_path)[0]) + else: + movieTitle, extension = os.path.splitext(movieTitle) + originalMovieTitle = movieTitle + + printLoading(film_file_list, searchedFilm, movieTitle) + + slug = searchedFilm + video_path = f"{path}/{slug}" + exists = Movies.query.filter_by(slug=video_path).first() is not None + + if not exists: + guessedData = guessit(originalMovieTitle) + guessedTitle = "" + year = None + if "title" not in guessedData: + guessedTitle = originalMovieTitle + else: + guessedTitle = guessedData["title"] + if "episode" in guessedData: + guessedTitle = f"{guessedData['episode']} {guessedTitle}" + if "alternative_title" in guessedData: + guessedTitle = ( + f"{guessedData['alternative_title']} - {guessedTitle}" + ) + if "part" in guessedData: + guessedTitle = f"{guessedTitle} Part {guessedData['part']}" + if "year" in guessedData: + year = guessedData["year"] + + try: + search = Search().movies(guessedTitle, year=year, adult=True) + except Exception: + search = Search().movies(guessedTitle, year=year) + + search = transformToDict(search) + if not search or not search["results"]: + all_movies_not_sorted.append(originalMovieTitle) + continue + + search = search["results"] + bestMatch = search[0] + if ( + config["ChocolateSettings"]["askwhichmovie"] == "false" + or len(search) == 1 + ): + for i in range(len(search)): + if ( + lev(guessedTitle, search[i]["title"]) + < lev(guessedTitle, bestMatch["title"]) + and bestMatch["title"] not in film_file_list + ): + bestMatch = search[i] + elif ( + lev(guessedTitle, search[i]["title"]) + == lev(guessedTitle, bestMatch["title"]) + and bestMatch["title"] not in film_file_list + ): + bestMatch = bestMatch + if ( + lev(guessedTitle, bestMatch["title"]) == 0 + and bestMatch["title"] not in film_file_list + ): + break + + res = bestMatch + try: + name = res["title"] + except AttributeError: + name = res["original_title"] + movie_id = res["id"] + details = movie.details(movie_id) + + movieCoverPath = f"https://image.tmdb.org/t/p/original{res['poster_path']}" + banner = f"https://image.tmdb.org/t/p/original{res['backdrop_path']}" + real_title, extension = os.path.splitext(originalMovieTitle) + + with open(f"{IMAGES_PATH}/{movie_id}_Cover.png", "wb") as f: + f.write(image_requests.get(movieCoverPath).content) + try: + img = Image.open(f"{IMAGES_PATH}/{movie_id}_Cover.png") + img.save(f"{IMAGES_PATH}/{movie_id}_Cover.webp", "webp") + os.remove(f"{IMAGES_PATH}/{movie_id}_Cover.png") + movieCoverPath = f"{IMAGES_PATH}/{movie_id}_Cover.webp" + img.close() + except Exception: + try: + os.rename( + f"{IMAGES_PATH}/{movie_id}_Cover.png", + f"{IMAGES_PATH}/{movie_id}_Cover.webp", + ) + movieCoverPath = "/static/img/broken.webp" + except Exception: + os.remove(f"{IMAGES_PATH}/{movie_id}_Cover.webp") + os.rename( + f"{IMAGES_PATH}/{movie_id}_Cover.png", + f"{IMAGES_PATH}/{movie_id}_Cover.webp", + ) + movieCoverPath = f"{IMAGES_PATH}/{movie_id}_Cover.webp" + with open(f"{IMAGES_PATH}/{movie_id}_Banner.png", "wb") as f: + f.write(image_requests.get(banner).content) + if not res["backdrop_path"]: + banner = f"https://image.tmdb.org/t/p/original{details.backdrop_path}" + if banner != "https://image.tmdb.org/t/p/originalNone": + with open(f"{IMAGES_PATH}/{movie_id}_Banner.png", "wb") as f: + f.write(image_requests.get(banner).content) + else: + banner = "/static/img/broken.webp" + try: + img = Image.open(f"{IMAGES_PATH}/{movie_id}_Banner.png") + img.save(f"{IMAGES_PATH}/{movie_id}_Banner.webp", "webp") + os.remove(f"{IMAGES_PATH}/{movie_id}_Banner.png") + banner = f"{IMAGES_PATH}/{movie_id}_Banner.webp" + img.close() + except Exception: + banner = "/static/img/brokenBanner.webp" + + description = res["overview"] + note = res["vote_average"] + try: + date = res["release_date"] + except AttributeError: + date = "Unknown" + casts = list(details.casts.cast)[:5] + theCast = [] + for cast in casts: + + actor_id = cast.id + actorImage = f"https://www.themovieDB.org/t/p/w600_and_h900_bestv2{cast.profile_path}" + if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): + with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: + f.write(image_requests.get(actorImage).content) + try: + img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") + os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img.close() + except Exception: + os.rename( + f"{IMAGES_PATH}/Actor_{actor_id}.png", + f"{IMAGES_PATH}/Actor_{actor_id}.webp", + ) + + actorImage = f"{IMAGES_PATH}/Actor_{actor_id}.webp" + if actor_id not in theCast: + theCast.append(actor_id) + else: + break + person = Person() + p = person.details(cast.id) + exists = Actors.query.filter_by(actor_id=cast.id).first() is not None + if not exists: + actor = Actors( + name=cast.name, + actor_image=actorImage, + actor_description=p.biography, + actor_birth_date=p.birthday, + actor_birth_place=p.place_of_birth, + actor_programs=f"{movie_id}", + actor_id=cast.id, + ) + DB.session.add(actor) + DB.session.commit() + else: + actor = Actors.query.filter_by(actor_id=cast.id).first() + actor.actor_programs = f"{actor.actor_programs} {movie_id}" + DB.session.commit() + theCast = ",".join([str(i) for i in theCast]) + try: + date = datetime.datetime.strptime(date, "%Y-%m-%d").strftime("%d/%m/%Y") + except ValueError: + date = "Unknown" + except UnboundLocalError: + date = "Unknown" + + genre = res["genre_ids"] + try: + length = length_video(video_path) + length = str(datetime.timedelta(seconds=length)) + length = length.split(":") + except Exception: + length = [] + + if len(length) == 3: + hours = length[0] + minutes = length[1] + seconds = str(round(float(length[2]))) + if int(seconds) < 10: + seconds = f"0{seconds}" + length = f"{hours}:{minutes}:{seconds}" + elif len(length) == 2: + minutes = length[0] + seconds = str(round(float(length[1]))) + if int(seconds) < 10: + seconds = f"0{seconds}" + length = f"{minutes}:{seconds}" + elif len(length) == 1: + seconds = str(round(float(length[0]))) + if int(seconds) < 10: + seconds = f"0{seconds}" + length = f"00:{seconds}" + else: + length = "0" + + duration = length + + movieGenre = [] + for genre_id in genre: + movieGenre.append(genre_list[genre_id]) + movieGenre = ",".join(movieGenre) + + bandeAnnonce = details.videos.results + bande_annonce_url = "" + if len(bandeAnnonce) > 0: + for video in bandeAnnonce: + bandeAnnonceType = video.type + bandeAnnonceHost = video.site + bandeAnnonceKey = video.key + if bandeAnnonceType == "Trailer": + try: + bande_annonce_url = ( + websites_trailers[bandeAnnonceHost] + bandeAnnonceKey + ) + break + except KeyError: + bande_annonce_url = "Unknown" + + alternatives_names = [] + actualTitle = movieTitle + characters = [" ", "-", "_", ":", ".", ",", "!", "'", "`", '"'] + empty = "" + for character in characters: + for character2 in characters: + if character != character2: + stringTest = actualTitle.replace(character, character2) + alternatives_names.append(stringTest) + stringTest = actualTitle.replace(character2, character) + alternatives_names.append(stringTest) + stringTest = actualTitle.replace(character, empty) + alternatives_names.append(stringTest) + stringTest = actualTitle.replace(character2, empty) + alternatives_names.append(stringTest) + + officialAlternativeNames = movie.alternative_titles( + movie_id=movie_id + ).titles + if officialAlternativeNames is not None: + for officialAlternativeName in officialAlternativeNames: + alternatives_names.append(officialAlternativeName.title) + + alternatives_names = list(dict.fromkeys(alternatives_names)) + + alternatives_names = ",".join(alternatives_names) + filmData = Movies( + id=movie_id, + title=movieTitle, + real_title=name, + cover=movieCoverPath, + banner=banner, + slug=video_path, + description=description, + note=note, + date=date, + genre=movieGenre, + duration=str(duration), + cast=theCast, + bande_annonce_url=bande_annonce_url, + adult=str(res["adult"]), + library_name=library_name, + alternatives_names=alternatives_names, + file_date=os.path.getmtime(video_path), + ) + DB.session.add(filmData) + DB.session.commit() + + movie_files = Movies.query.filter_by(library_name=library_name).all() + for movie in movie_files: + slug = movie.slug + if not os.path.exists(slug): + DB.session.delete(movie) + DB.session.commit() + + +def getSeries(library_name): + allSeriesPath = Libraries.query.filter_by(lib_name=library_name).first().lib_folder + allSeries = os.listdir(allSeriesPath) + allSeriesName = [] + for dir in allSeries: + if os.path.isdir(f"{allSeriesPath}/{dir}"): + allSeriesName.append(f"{allSeriesPath}/{dir}") + + if not is_connected(): + return + + show = TV() + + for serie in allSeriesName: + if not isinstance(serie, str): + continue + + printLoading(allSeriesName, serie, serie) + + seriePath = serie + serieTitle = serie.split("/")[-1] + originalSerieTitle = serieTitle + try: + serie_modified_time = os.path.getmtime(seriePath) + except FileNotFoundError: + print(f"Cant find {originalSerieTitle}") + continue + + serie_guess = guessit(originalSerieTitle) + if "title" in serie_guess: + serieTitle = serie_guess["title"] + + if "alternative_title" in serie_guess: + serieTitle = f"{serieTitle} - {serie_guess['alternative_title']}" + + try: + if "year" in serie_guess: + search = Search().tv_shows(serieTitle, release_year=serie_guess["year"]) + else: + search = Search().tv_shows(serieTitle) + except TMDbException: + break + + search = search.results + search = transformToDict(search) + + if search == {}: + continue + + askForGoodSerie = config["ChocolateSettings"]["askWhichSerie"] + bestMatch = search[0] + if askForGoodSerie == "false" or len(search) == 1: + for i in range(len(search)): + if ( + lev(serieTitle, search[i]["name"]) + < lev(serieTitle, bestMatch["name"]) + and bestMatch["name"] not in allSeriesName + ): + bestMatch = search[i] + elif ( + lev(serieTitle, search[i]["name"]) + == lev(serieTitle, bestMatch["name"]) + and bestMatch["name"] not in allSeriesName + ): + bestMatch = bestMatch + if ( + lev(serieTitle, bestMatch["name"]) == 0 + and bestMatch["name"] not in allSeriesName + ): + break + + res = bestMatch + serie_id = str(res["id"]) + + if ( + DB.session.query(Series).filter_by(original_name=serieTitle).first() + is not None + ): + serie_id = ( + DB.session.query(Series).filter_by(original_name=serieTitle).first().id + ) + + exists = DB.session.query(Series).filter_by(id=serie_id).first() is not None + + details = show.details(serie_id) + defaultNbOfSeasons = details.number_of_seasons + defaultNbOfEpisodes = details.number_of_episodes + seasonsInfo = details.seasons + + seasonsNumber = [] + seasons = os.listdir(seriePath) + for season in seasons: + if os.path.isdir(f"{seriePath}/{season}") and season != "": + season = re.sub(r"\D", "", season) + if season == "": + continue + seasonsNumber.append(int(season)) + + episodes = [] + for season in seasons: + allEpisodes = os.listdir(f"{seriePath}/{season}") + for episode in allEpisodes: + if os.path.isfile( + f"{seriePath}/{season}/{episode}" + ): + episodes.append(episode) + + nbEpisodes = len(episodes) + nbSeasons = len(seasons) + + episodeGroups = show.episode_groups(serie_id).results + # print(f"Pour {serie_name} : nbEpisodes: {nbEpisodes} nbSeasons: {nbSeasons} defaultNbOfEpisodes: {defaultNbOfEpisodes} defaultNbOfSeasons: {defaultNbOfSeasons}") + + if nbEpisodes <= defaultNbOfEpisodes and nbSeasons <= defaultNbOfSeasons: + pass + elif len(episodeGroups) > 0: + seasonsInfo = None + for group in episodeGroups: + groupNbEpisodes = group.episode_count + groupNbSeasons = group.group_count + + if nbEpisodes >= groupNbEpisodes * 0.95 and nbSeasons == groupNbSeasons: + theGroup = Group() + seasonsInfo = theGroup.details(group.id).groups + for season in seasonsInfo: + season = season.__dict__ + if len(season["episodes"]) > 0: + season["season_number"] = season["order"] + season["episode_count"] = len(season["episodes"]) + print(len(season["episodes"])) + season["air_date"] = season["episodes"][0]["air_date"] + season["overview"] = "" + season["poster_path"] = season["episodes"][0]["still_path"] + if seasonsInfo is None: + for group in episodeGroups: + if nbEpisodes <= groupNbEpisodes and nbSeasons <= groupNbSeasons: + groupNbEpisodes = group.episode_count + groupNbSeasons = group.group_count + + if ( + nbEpisodes == groupNbEpisodes + and nbSeasons == groupNbSeasons + ): + theGroup = Group() + seasonsInfo = theGroup.details(group.id).groups + for season in seasonsInfo: + season["season_number"] = season["order"] + season["episode_count"] = len(season["episodes"]) + season["air_date"] = season["episodes"][0]["air_date"] + season["overview"] = "" + season["poster_path"] = season["episodes"][0][ + "still_path" + ] + break + + if seasonsInfo is None: + group = episodeGroups[0] + theGroup = Group() + seasonsInfo = theGroup.details(group.id).groups + for season in seasonsInfo: + season["season_number"] = season["order"] + season["episode_count"] = len(season["episodes"]) + season["air_date"] = season["episodes"][0]["air_date"] + season["overview"] = "" + season["poster_path"] = season["episodes"][0]["still_path"] + + name = res["name"] + if not exists: + cover = f"https://image.tmdb.org/t/p/original{res['poster_path']}" + banner = f"https://image.tmdb.org/t/p/original{res['backdrop_path']}" + if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Cover.png"): + with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: + f.write(image_requests.get(cover).content) + try: + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") + img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") + os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") + img.close() + except Exception: + + pass + + if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.png"): + with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: + f.write(image_requests.get(banner).content) + try: + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") + img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") + os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") + img.close() + except Exception: + + pass + + banner = f"{IMAGES_PATH}/{serie_id}_Banner.webp" + cover = f"{IMAGES_PATH}/{serie_id}_Cover.webp" + description = res["overview"] + note = res["vote_average"] + date = res["first_air_date"] + cast = details.credits.cast + runTime = details.episode_run_time + duration = "" + for i in range(len(runTime)): + if i != len(runTime) - 1: + duration += f"{str(runTime[i])}:" + else: + duration += f"{str(runTime[i])}" + serieGenre = details.genres + bandeAnnonce = details.videos.results + bande_annonce_url = "" + if len(bandeAnnonce) > 0: + for video in bandeAnnonce: + bandeAnnonceType = video.type + bandeAnnonceHost = video.site + bandeAnnonceKey = video.key + if bandeAnnonceType == "Trailer" or len(bandeAnnonce) == 1: + try: + bande_annonce_url = ( + websites_trailers[bandeAnnonceHost] + bandeAnnonceKey + ) + break + except KeyError: + bande_annonce_url = "Unknown" + + genreList = [] + for genre in serieGenre: + genreList.append(str(genre.name)) + genreList = ",".join(genreList) + newCast = [] + cast = list(cast)[:5] + for actor in cast: + actor_id = actor.id + actorImage = f"https://image.tmdb.org/t/p/original{actor.profile_path}" + image = f"{IMAGES_PATH}/Actor_{actor_id}.png" + if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): + try: + with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: + f.write(image_requests.get(actorImage).content) + img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") + os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") + image = f"{IMAGES_PATH}/Actor_{actor_id}.webp" + img.close() + except Exception: + + pass + + actorImage = image + + actor.profile_path = str(actorImage) + newCast.append(actor_id) + + person = Person() + p = person.details(actor.id) + exists = Actors.query.filter_by(actor_id=actor.id).first() is not None + if not exists: + actor = Actors( + name=actor.name, + actor_id=actor.id, + actor_image=actorImage, + actor_description=p.biography, + actor_birth_date=p.birthday, + actor_birth_place=p.place_of_birth, + actor_programs=f"{serie_id}", + ) + DB.session.add(actor) + DB.session.commit() + else: + actor = Actors.query.filter_by(actor_id=actor.id).first() + if serie_id not in actor.actor_programs: + actor.actor_programs = f"{actor.actor_programs} {serie_id}" + DB.session.commit() + + newCast = newCast[:5] + newCast = ",".join([str(i) for i in newCast]) + isAdult = str(details["adult"]) + serieObject = Series( + id=serie_id, + name=name, + original_name=originalSerieTitle, + genre=genreList, + duration=duration, + description=description, + cast=newCast, + bande_annonce_url=bande_annonce_url, + cover=cover, + banner=banner, + note=note, + date=date, + serie_modified_time=serie_modified_time, + adult=isAdult, + library_name=library_name, + ) + DB.session.add(serieObject) + DB.session.commit() + + for season in seasonsInfo: + season = transformToDict(season) + allSeasons = os.listdir(seriePath) + url = None + for season_dir in allSeasons: + season_dir_number = re.sub(r"\D", "", season_dir) + if season_dir_number != "" and int(season_dir_number) == int(season["season_number"]): + url = f"{seriePath}/{season_dir}" + break + if not url: + #print(f"\nCan't find {serieTitle} season {season['season_number']}") + continue + season_dir = url + #print(f"\nSeason {season['season_number']} of {serieTitle} found: {season_dir}") + seasonInDB = Seasons.query.filter_by(season_id=season["id"]).first() + if seasonInDB: + modified_date = seasonInDB.modified_date + try: + actualSeasonModifiedTime = os.path.getmtime(url) + except FileNotFoundError: + continue + if seasonInDB is None or modified_date != actualSeasonModifiedTime: + try: + allEpisodes = [ + f + for f in os.listdir(season_dir) + if os.path.isfile(path_join(season_dir, f)) + ] + except FileNotFoundError: + continue + if seasonInDB: + seasonInDB.modified_date = modified_date + DB.session.commit() + bigSeason = season + releaseDate = season["air_date"] + episodes_number = season["episode_count"] + season_number = season["season_number"] + season_id = season["id"] + season_name = season["name"] + season_description = season["overview"] + seasonPoster = season["poster_path"] + + try: + seasonModifiedTime = os.path.getmtime(season_dir) + savedModifiedTime = ( + Seasons.query.filter_by(season_id=season_id) + .first() + .seasonModifiedTime + ) + except AttributeError: + seasonModifiedTime = os.path.getmtime(season_dir) + + if len(allEpisodes) > 0 or (seasonModifiedTime != savedModifiedTime): + try: + exists = ( + Seasons.query.filter_by(season_id=season_id).first() + is not None + ) + except sqlalchemy.exc.PendingRollbackError: + DB.session.rollback() + exists = ( + Seasons.query.filter_by(season_id=season_id).first() + is not None + ) + # number of episodes in the season + savedModifiedTime = 0 + if not exists or (seasonModifiedTime != savedModifiedTime): + season_cover_path = ( + f"https://image.tmdb.org/t/p/original{seasonPoster}" + ) + if not os.path.exists(f"{IMAGES_PATH}/{season_id}_Cover.png"): + try: + with open( + f"{IMAGES_PATH}/{season_id}_Cover.png", "wb" + ) as f: + f.write(image_requests.get(season_cover_path).content) + img = Image.open(f"{IMAGES_PATH}/{season_id}_Cover.png") + img.save( + f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp" + ) + os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") + season_cover_path = ( + f"{IMAGES_PATH}/{season_id}_Cover.webp" + ) + img.close() + except Exception: + try: + with open( + f"{IMAGES_PATH}/{season_id}_Cover.png", "wb" + ) as f: + f.write(image_requests.get(season_cover_path).content) + img = Image.open( + f"{IMAGES_PATH}/{season_id}_Cover.png" + ) + img.save( + f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp" + ) + os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") + season_cover_path = ( + f"{IMAGES_PATH}/{season_id}_Cover.webp" + ) + img.close() + except Exception: + season_cover_path = "/static/img/brokenImage.png" + + allSeasons = os.listdir(seriePath) + + try: + modified_date = os.path.getmtime(season_dir) + except FileNotFoundError: + modified_date = 0 + + allEpisodesInDB = Episodes.query.filter_by( + season_id=season_id + ).all() + allEpisodesInDB = [ + episode.episode_name for episode in allEpisodesInDB + ] + + exists = ( + Seasons.query.filter_by(season_id=season_id).first() is not None + ) + if not exists: + thisSeason = Seasons( + serie=serie_id, + release=releaseDate, + episodes_number=episodes_number, + season_number=season_number, + season_id=season_id, + season_name=season_name, + season_description=season_description, + cover=season_cover_path, + modified_date=modified_date, + number_of_episode_in_folder=len(allEpisodes), + ) + + try: + DB.session.add(thisSeason) + DB.session.commit() + except sqlalchemy.exc.PendingRollbackError: + DB.session.rollback() + DB.session.add(thisSeason) + DB.session.commit() + if len(allEpisodes) != len(allEpisodesInDB): + for episode in allEpisodes: + slug = f"{season_dir}/{episode}" + episodeName = slug.split("/")[-1] + guess = guessit(episodeName) + if "episode" in guess: + episodeIndex = guess["episode"] + elif "episode_title" in guess: + episodeIndex = guess["episode_title"] + elif "season" in guess and len(guess["season"]) == 2: + episodeIndex = guess["season"][1] + elif "season" in guess: + episodeIndex = guess["season"] + elif "title" in guess: + episodeIndex = guess["title"] + + else: + print( + f"Can't find the episode index of {episodeName}, data: {guess}, slug: {slug}" + ) + continue + + if isinstance(episodeIndex, list): + for i in range(len(episodeIndex)): + if isinstance(episodeIndex[i], int): + print(f"Episode index is {episodeIndex}") + episodeIndex[i] = str(episodeIndex[i]) + break + episodeIndex = "".join(episodeIndex) + + exists = Episodes.query.filter_by(episode_number=int(episodeIndex), season_id=season_id).first() is not None + + if not exists: + #print(f"Episode {episodeIndex} of {serieTitle} for the Season {season_id} not found") + if isinstance(season_id, int) or season_id.isnumeric(): + showEpisode = Episode() + #print(f"Get episodeInfo of : E{episodeIndex} S{season_number} of {serieTitle}") + try: + episodeDetails = showEpisode.details( + serie_id, season_number, episodeIndex + ) + except TMDbException: + #episode does not exist + continue + realEpisodeName = episodeDetails.name + episodeInfo = showEpisode.details( + serie_id, season_number, episodeIndex + ) + episode_id = episodeInfo["id"] + else: + print(f"Get episodeInfo of : E{episodeIndex} S{season_number} of {serieTitle}") + episodeInfo = bigSeason["episodes"][ + int(episodeIndex) - 1 + ] + episode_id = episodeInfo["id"] + realEpisodeName = episodeInfo["name"] + + coverEpisode = f"https://image.tmdb.org/t/p/original{episodeInfo['still_path']}" + + if not os.path.exists( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" + ): + with open( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png", + "wb", + ) as f: + f.write(image_requests.get(coverEpisode).content) + try: + img = Image.open( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" + ) + img.save( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp", + "webp", + ) + os.remove( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" + ) + img.close() + coverEpisode = f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" + except Exception: + coverEpisode = f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" + try: + exists = ( + Episodes.query.filter_by( + episode_id=episode_id + ).first() + is not None + ) + except sqlalchemy.exc.PendingRollbackError: + DB.session.rollback() + exists = ( + Episodes.query.filter_by( + episode_id=episode_id + ).first() + is not None + ) + if not exists: + episodeData = Episodes( + episode_id=episode_id, + episode_name=realEpisodeName, + season_id=season_id, + episode_number=episodeIndex, + episode_description=episodeInfo["overview"], + episode_cover_path=coverEpisode, + release_date=episodeInfo["air_date"], + slug=slug, + intro_start=0.0, + intro_end=0.0, + ) + thisSeason = Seasons.query.filter_by( + season_id=season_id + ).first() + thisSeason.number_of_episode_in_folder += 1 + try: + DB.session.add(episodeData) + DB.session.commit() + except Exception: + DB.session.rollback() + DB.session.add(episodeData) + DB.session.commit() + else: + pass + + allFiles = [ + name + for name in os.listdir(allSeriesPath) + if os.path.isfile(path_join(allSeriesPath, name)) + and not name.endswith((".rar", ".zip", ".part")) + ] + for file in allFiles: + printLoading(allFiles, file, file) + + slug = path_join(allSeriesPath, file) + exists = Episodes.query.filter_by(slug=slug).first() is not None + + if not exists: + guess = guessit(file) + # print(f"\n {guess}") + title = guess["title"] + if "episode" not in guess: + season = guess["season"] + if isinstance(guess["season"], list): + season, episode = guess["season"] + else: + season = guess["season"] + episode = int(guess["episode_title"]) + else: + season = guess["season"] + episode = guess["episode"] + + seasonIndex = season + originalFile = file + episodeIndex = episode + originalSerieTitle = title + serie_modified_time = 0 + series = TV() + show = Search().tv_shows(title) + res = show[0] + serie = res.name + serie_id = res.id + details = series.details(serie_id) + episodeGroups = series.episode_groups(serie_id).results + serieEpisodes = [] + serieSeasons = [] + + for file in allFiles: + guess = guessit(file) + serie = guess["title"] + season = guess["season"] + if isinstance(season, list): + season, episode = guess["season"] + season = int(season) + if serie == originalSerieTitle: + serieEpisodes.append(file) + if season not in serieSeasons: + serieSeasons.append(season) + + file = originalFile + + defaultNbOfSeasons = details.number_of_seasons + defaultNbOfEpisodes = details.number_of_episodes + + nbSeasons = len(serieSeasons) + nbEpisodes = len(serieEpisodes) + + season_api = None + season_id = None + + if nbEpisodes <= defaultNbOfEpisodes and nbSeasons <= defaultNbOfSeasons: + for seasontmdb in details.seasons: + if str(seasontmdb.season_number) == str(seasonIndex): + season_id = seasontmdb.id + season_api = seasontmdb + break + elif len(episodeGroups) > 0: + for group in episodeGroups: + groupNbEpisodes = group.episode_count + groupNbSeasons = group.group_count + if nbEpisodes <= groupNbEpisodes and nbSeasons <= groupNbSeasons: + theGroup = Group() + seasonsInfo = theGroup.details(group.id).groups + for season in seasonsInfo: + season["season_number"] = season["order"] + season["episode_count"] = len(season["episodes"]) + season["air_date"] = season["episodes"][0]["air_date"] + season["overview"] = "" + season["poster_path"] = season["episodes"][0]["still_path"] + + season_api = seasonsInfo[seasonIndex - 1] + season_id = season_api["id"] + else: + for seasontmdb in details.seasons: + if str(seasontmdb.season_number) == str(seasonIndex): + season_id = seasontmdb.id + season_api = seasontmdb + break + + serieExists = Series.query.filter_by(id=serie_id).first() is not None + if not serieExists: + name = res.name + cover = f"https://image.tmdb.org/t/p/original{res.poster_path}" + banner = f"https://image.tmdb.org/t/p/original{res.backdrop_path}" + if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Cover.png"): + with open(f"{IMAGES_PATH}/{serie_id}_Cover.png", "wb") as f: + f.write(image_requests.get(cover).content) + try: + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Cover.png") + img.save(f"{IMAGES_PATH}/{serie_id}_Cover.webp", "webp") + os.remove(f"{IMAGES_PATH}/{serie_id}_Cover.png") + img.close() + except Exception as e: + print(f"Error with the image of the {serie}:\n{e}") + pass + + new_banner = f"{IMAGES_PATH}/{serie_id}_Banner.webp" + if not os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.png"): + with open(f"{IMAGES_PATH}/{serie_id}_Banner.png", "wb") as f: + f.write(image_requests.get(banner).content) + + if os.path.exists(f"{IMAGES_PATH}/{serie_id}_Banner.png"): + img = Image.open(f"{IMAGES_PATH}/{serie_id}_Banner.png") + img.save(f"{IMAGES_PATH}/{serie_id}_Banner.webp", "webp") + os.remove(f"{IMAGES_PATH}/{serie_id}_Banner.png") + img.close() + else: + new_banner = f"{IMAGES_PATH}/{serie_id}_Banner.png" + + cover = f"{IMAGES_PATH}/{serie_id}_Cover.webp" + description = res["overview"] + note = res.vote_average + date = res.first_air_date + cast = details.credits.cast + runTime = details.episode_run_time + duration = "" + for i in range(len(runTime)): + if i != len(runTime) - 1: + duration += f"{str(runTime[i])}:" + else: + duration += f"{str(runTime[i])}" + serieGenre = details.genres + bandeAnnonce = details.videos.results + bande_annonce_url = "" + if len(bandeAnnonce) > 0: + for video in bandeAnnonce: + bandeAnnonceType = video.type + bandeAnnonceHost = video.site + bandeAnnonceKey = video.key + if bandeAnnonceType == "Trailer" or len(bandeAnnonce) == 1: + try: + bande_annonce_url = ( + websites_trailers[bandeAnnonceHost] + + bandeAnnonceKey + ) + break + except KeyError: + bande_annonce_url = "Unknown" + + genreList = [] + for genre in serieGenre: + genreList.append(str(genre.name)) + genreList = ",".join(genreList) + newCast = [] + cast = list(cast)[:5] + for actor in cast: + actor_id = actor.id + actorImage = ( + f"https://image.tmdb.org/t/p/original{actor.profile_path}" + ) + if not os.path.exists(f"{IMAGES_PATH}/Actor_{actor_id}.webp"): + with open(f"{IMAGES_PATH}/Actor_{actor_id}.png", "wb") as f: + f.write(image_requests.get(actorImage).content) + img = Image.open(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img.save(f"{IMAGES_PATH}/Actor_{actor_id}.webp", "webp") + os.remove(f"{IMAGES_PATH}/Actor_{actor_id}.png") + img.close() + + actorImage = f"{IMAGES_PATH}/Actor_{actor_id}.webp" + actor.profile_path = str(actorImage) + thisActor = actor_id + newCast.append(thisActor) + + person = Person() + p = person.details(actor.id) + exists = ( + Actors.query.filter_by(actor_id=actor.id).first() is not None + ) + if not exists: + actor = Actors( + name=actor.name, + actor_id=actor.id, + actor_image=actorImage, + actor_description=p.biography, + actor_birth_date=p.birthday, + actor_birth_place=p.place_of_birth, + actor_programs=f"{serie_id}", + ) + DB.session.add(actor) + DB.session.commit() + else: + actor = Actors.query.filter_by(actor_id=actor.id).first() + actor.actor_programs = f"{actor.actor_programs} {serie_id}" + DB.session.commit() + + newCast = newCast[:5] + newCast = ",".join([str(i) for i in newCast]) + isAdult = str(details["adult"]) + serieObject = Series( + id=serie_id, + name=name, + original_name=originalSerieTitle, + genre=genreList, + duration=duration, + description=description, + cast=newCast, + bande_annonce_url=bande_annonce_url, + cover=cover, + banner=new_banner, + note=note, + date=date, + serie_modified_time=serie_modified_time, + adult=isAdult, + library_name=library_name, + ) + DB.session.add(serieObject) + DB.session.commit() + + # print(f"Pour {file}, serie_id = {serie_id} et season_id = {season_id}") + + seasonExists = ( + Seasons.query.filter_by(serie=serie_id, season_id=season_id).first() + is not None + ) + + if season_api and not seasonExists: + season = season_api + releaseDate = season.air_date + episodes_number = season.episode_count + season_number = season.season_number + season_name = season.name + season_description = season.overview + seasonPoster = season.poster_path + + savedModifiedTime = 0 + + season_cover_path = f"https://image.tmdb.org/t/p/original{seasonPoster}" + if not os.path.exists(f"{IMAGES_PATH}/{season_id}_Cover.png"): + with open(f"{IMAGES_PATH}/{season_id}_Cover.png", "wb") as f: + f.write(image_requests.get(season_cover_path).content) + try: + img = Image.open(f"{IMAGES_PATH}/{season_id}_Cover.png") + img.save(f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp") + os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") + season_cover_path = f"{IMAGES_PATH}/{season_id}_Cover.webp" + img.close() + except Exception: + with open(f"{IMAGES_PATH}/{season_id}_Cover.png", "wb") as f: + f.write(image_requests.get(season_cover_path).content) + try: + img = Image.open(f"{IMAGES_PATH}/{season_id}_Cover.png") + img.save( + f"{IMAGES_PATH}/{season_id}_Cover.webp", "webp" + ) + os.remove(f"{IMAGES_PATH}/{season_id}_Cover.png") + season_cover_path = f"{IMAGES_PATH}/{season_id}_Cover.webp" + img.close() + except Exception: + season_cover_path = "/static/img/brokenImage.png" + + try: + modified_date = os.path.getmtime(f"{allSeriesPath}{slug}") + except Exception: + modified_date = 0 + + seasonObject = Seasons( + serie=serie_id, + season_id=season_id, + season_name=season_name, + season_description=season_description, + cover=season_cover_path, + season_number=season_number, + episodes_number=episodes_number, + release=releaseDate, + modified_date=modified_date, + number_of_episode_in_folder=0, + ) + + DB.session.add(seasonObject) + DB.session.commit() + + bigSeason = season_api + + showEpisode = Episode() + season_number = seasonIndex + serie_id, season_number, episodeIndex = ( + str(serie_id), + str(season_number), + str(episodeIndex), + ) + + try: + exists = ( + Episodes.query.filter_by( + episode_number=episodeIndex, season_id=season_id + ).first() + is not None + ) + except sqlalchemy.exc.PendingRollbackError: + DB.session.rollback() + exists = ( + Episodes.query.filter_by( + episode_number=episodeIndex, season_id=season_id + ).first() + is not None + ) + if not exists: + if isinstance(season_id, int) or season_id.isnumeric(): + showEpisode = Episode() + episodeDetails = showEpisode.details( + serie_id, season_number, episodeIndex + ) + realEpisodeName = episodeDetails.name + episodeInfo = showEpisode.details( + serie_id, season_number, episodeIndex + ) + episode_id = episodeInfo.id + else: + episodeInfo = bigSeason["episodes"][int(episodeIndex) - 1] + episode_id = episodeInfo["id"] + realEpisodeName = episodeInfo["name"] + + coverEpisode = ( + f"https://image.tmdb.org/t/p/original{episodeInfo.still_path}" + ) + + if not os.path.exists( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" + ): + with open( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png", "wb" + ) as f: + f.write(image_requests.get(coverEpisode).content) + try: + img = Image.open( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" + ) + img.save( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp", "webp" + ) + os.remove(f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png") + coverEpisode = ( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.webp" + ) + img.close() + except Exception: + coverEpisode = ( + f"{IMAGES_PATH}/{season_id}_{episode_id}_Cover.png" + ) + try: + exists = ( + Episodes.query.filter_by(episode_id=episode_id).first() + is not None + ) + except sqlalchemy.exc.PendingRollbackError: + DB.session.rollback() + exists = ( + Episodes.query.filter_by(episode_id=episode_id).first() + is not None + ) + if not exists: + # Mprint(f"Pour le fichier {file}, j'ai trouvé : \n - episode_number: {episodeIndex} \n - season_id: {season_id} \n - Serie: {serie_id} \n - Episode ID: {episode_id}") + + episodeData = Episodes( + episode_id=episode_id, + episode_name=realEpisodeName, + season_id=season_id, + episode_number=episodeIndex, + episode_description=episodeInfo.overview, + episode_cover_path=coverEpisode, + release_date=episodeInfo.air_date, + slug=slug, + intro_start=0.0, + intro_end=0.0, + ) + thisSeason = Seasons.query.filter_by(season_id=season_id).first() + thisSeason.number_of_episode_in_folder += 1 + try: + DB.session.add(episodeData) + DB.session.commit() + except Exception: + DB.session.rollback() + DB.session.add(episodeData) + DB.session.commit() + + allSeriesInDB = Series.query.all() + allSeriesInDB = [ + serie.original_name + for serie in allSeriesInDB + if serie.library_name == library_name + ] + + for serie in allSeriesInDB: + serie_id = Series.query.filter_by(original_name=serie).first().id + allSeasons = Seasons.query.filter_by(serie=serie_id).all() + if serie not in allSeries: + for season in allSeasons: + season_id = season.season_id + allEpisodes = Episodes.query.filter_by(season_id=season_id).all() + for episode in allEpisodes: + if not os.path.exists(episode.slug): + try: + DB.session.delete(episode) + DB.session.commit() + except Exception: + DB.session.rollback() + DB.session.delete(episode) + DB.session.commit() + + for season in allSeasons: + season_id = season.season_id + allEpisodes = Episodes.query.filter_by(season_id=season_id).all() + if len(allEpisodes) == 0: + try: + DB.session.delete(season) + DB.session.commit() + except Exception: + DB.session.rollback() + DB.session.delete(season) + DB.session.commit() + allSeasons = Seasons.query.filter_by(serie=serie_id).all() + if len(allSeasons) == 0: + try: + DB.session.delete(Series.query.filter_by(id=serie_id).first()) + DB.session.commit() + except Exception: + DB.session.rollback() + DB.session.delete(Series.query.filter_by(id=serie_id).first()) + DB.session.commit() + + +def getGames(library_name): + allGamesPath = Libraries.query.filter_by(lib_name=library_name).first().lib_folder + try: + allConsoles = [ + name + for name in os.listdir(allGamesPath) + if os.path.isdir(path_join(allGamesPath, name)) + and not name.endswith((".rar", ".zip", ".part")) + ] + except Exception: + return + + for console in allConsoles: + if os.listdir(f"{allGamesPath}/{console}") == []: + allConsoles.remove(console) + saidPS1 = False + supportedConsoles = [ + "3DO", + "Amiga", + "Atari 2600", + "Atari 5200", + "Atari 7800", + "Atari Jaguar", + "Atari Lynx", + "GB", + "GBA", + "GBC", + "N64", + "NDS", + "NES", + "SNES", + "Neo Geo Pocket", + "PSX", + "Sega 32X", + "Sega CD", + "Sega Game Gear", + "Sega Master System", + "Sega Mega Drive", + "Sega Saturn", + "PS1", + ] + supportedFileTypes = [ + ".zip", + ".adf", + ".adz", + ".dms", + ".fdi", + ".ipf", + ".hdf", + ".lha", + ".slave", + ".info", + ".cdd", + ".nrg", + ".mds", + ".chd", + ".uae", + ".m3u", + ".a26", + ".a52", + ".a78", + ".j64", + ".lnx", + ".gb", + ".gba", + ".gbc", + ".n64", + ".nds", + ".nes", + ".ngp", + ".psx", + ".sfc", + ".smc", + ".smd", + ".32x", + ".cd", + ".gg", + ".md", + ".sat", + ".sms", + ] + for console in allConsoles: + if console not in supportedConsoles: + print( + f"{console} is not supported or the console name is not correct, here is the list of supported consoles: \n{', '.join(supportedConsoles)} rename the folder to one of these names if it's the correct console" + ) + break + + printLoading(allConsoles, console, console) + + allFiles = os.listdir(f"{allGamesPath}/{console}") + for file in allFiles: + # get all games in the db + allGamesInDB = Games.query.filter_by( + library_name=library_name, console=console + ).all() + allGamesInDB = [game.slug for game in allGamesInDB] + numberOfGamesInDB = len(allGamesInDB) + numberOfGamesInFolder = len(allFiles) + if numberOfGamesInDB < numberOfGamesInFolder: + gameSlug = f"{allGamesPath}/{console}/{file}" + exists = Games.query.filter_by(slug=gameSlug).first() is not None + if file.endswith(tuple(supportedFileTypes)) and not exists: + newFileName = file + newFileName = re.sub(r"\d{5} - ", "", newFileName) + newFileName = re.sub(r"\d{4} - ", "", newFileName) + newFileName = re.sub(r"\d{3} - ", "", newFileName) + newFileName, extension = os.path.splitext(newFileName) + newFileName = newFileName.rstrip() + newFileName = f"{newFileName}{extension}" + os.rename( + f"{allGamesPath}/{console}/{file}", + f"{allGamesPath}/{console}/{newFileName}", + ) + + printLoading(allFiles, file, newFileName) + + file = newFileName + + file, extension = os.path.splitext(file) + + gameIGDB = searchGame(file, console) + + if gameIGDB is not None and gameIGDB != {} and not exists: + gameName = gameIGDB["title"] + gameCover = gameIGDB["cover"] + gameDescription = gameIGDB["description"] + gameNote = gameIGDB["note"] + gameDate = gameIGDB["date"] + gameGenre = gameIGDB["genre"] + game_id = gameIGDB["id"] + else: + gameName = file + gameCover = "/static/img/broken.webp" + gameDescription = "" + gameNote = 0 + gameDate = "" + gameGenre = "" + game_id = str(uuid.uuid4()) + + gameRealTitle = newFileName + gameConsole = console + + game = Games( + console=gameConsole, + id=game_id, + title=gameName, + real_title=gameRealTitle, + cover=gameCover, + description=gameDescription, + note=gameNote, + date=gameDate, + genre=gameGenre, + slug=gameSlug, + library_name=library_name, + ) + DB.session.add(game) + DB.session.commit() + + elif console == "PS1" and file.endswith(".cue") and not exists: + if not saidPS1: + print( + "You need to zip all our .bin files and the .cue file in one .zip file to being able to play it" + ) + saidPS1 = True + + value = config["ChocolateSettings"]["compressPS1Games"] + if value.lower() == "true": + index = allFiles.index(file) - 1 + + allBins = [] + while allFiles[index].endswith(".bin"): + allBins.append(allFiles[index]) + index -= 1 + + fileName, extension = os.path.splitext(file) + with zipfile.ZipFile( + f"{allGamesPath}/{console}/{fileName}.zip", "w" + ) as zipObj: + for binFiles in allBins: + zipObj.write( + f"{allGamesPath}/{console}/{binFiles}", binFiles + ) + zipObj.write(f"{allGamesPath}/{console}/{file}", file) + for binFiles in allBins: + os.remove(f"{allGamesPath}/{console}/{binFiles}") + os.remove(f"{allGamesPath}/{console}/{file}") + file = f"{fileName}.zip" + newFileName = file + newFileName = re.sub(r"\d{5} - ", "", newFileName) + newFileName = re.sub(r"\d{4} - ", "", newFileName) + newFileName = re.sub(r"\d{3} - ", "", newFileName) + newFileName, extension = os.path.splitext(newFileName) + newFileName = newFileName.rstrip() + newFileName = f"{newFileName}{extension}" + os.rename( + f"{allGamesPath}/{console}/{file}", + f"{allGamesPath}/{console}/{newFileName}", + ) + file = newFileName + while ".." in newFileName: + newFileName = newFileName.replace("..", ".") + try: + os.rename( + f"{allGamesPath}/{console}/{file}", + f"{allGamesPath}/{console}/{newFileName}", + ) + except FileExistsError: + os.remove(f"{allGamesPath}/{console}/{file}") + file, extension = os.path.splitext(file) + + gameIGDB = searchGame(file, console) + if gameIGDB is not None and gameIGDB != {}: + gameName = gameIGDB["title"] + gameRealTitle = newFileName + gameCover = gameIGDB["cover"] + + with open( + f"{IMAGES_PATH}/{console}_{gameIGDB['id']}.png", "wb" + ) as f: + f.write(image_requests.get(gameCover).content) + gameCover = f"{IMAGES_PATH}/{console}_{gameRealTitle}.png" + img = Image.open(gameCover) + img.save( + f"{IMAGES_PATH}/{console}_{gameRealTitle}.webp", "webp" + ) + os.remove(gameCover) + img.close() + gameCover = f"{IMAGES_PATH}/{console}_{gameRealTitle}.webp" + + gameDescription = gameIGDB["description"] + gameNote = gameIGDB["note"] + gameDate = gameIGDB["date"] + gameGenre = gameIGDB["genre"] + game_id = gameIGDB["id"] + gameConsole = console + gameSlug = f"{allGamesPath}/{console}/{newFileName}" + game = Games.query.filter_by(slug=gameSlug).first() + print(game) + if not game: + game = Games( + console=gameConsole, + id=game_id, + title=gameName, + real_title=gameRealTitle, + cover=gameCover, + description=gameDescription, + note=gameNote, + date=gameDate, + genre=gameGenre, + slug=gameSlug, + ) + DB.session.add(game) + DB.session.commit() + elif not file.endswith(".bin") and not exists: + print( + f"{file} is not supported, here's the list of supported files : \n{','.join(supportedFileTypes)}" + ) + gamesInDb = Games.query.filter_by(console=console).all() + gamesInDb = [game.real_title for game in gamesInDb] + for game in gamesInDb: + if game not in allFiles: + game = Games.query.filter_by(console=console, real_title=game).first() + DB.session.delete(game) + DB.session.commit() + + +def getOthersVideos(library, allVideosPath=None): + if not allVideosPath: + allVideosPath = Libraries.query.filter_by(lib_name=library).first().lib_folder + try: + allVideos = os.listdir(allVideosPath) + except Exception: + return + else: + allVideos = os.listdir(f"{allVideosPath}") + + supportedVideoTypes = [ + ".mp4", + ".webm", + ".mkv", + ".avi", + ".mov", + ".wmv", + ".flv", + ".mpg", + ".mpeg", + ] + + allDirectories = [ + video for video in allVideos if os.path.isdir(f"{allVideosPath}/{video}") + ] + allVideos = [ + video + for video in allVideos + if os.path.splitext(video)[1] in supportedVideoTypes + ] + + for directory in allDirectories: + directoryPath = f"{allVideosPath}/{directory}" + getOthersVideos(library, directoryPath) + + for video in allVideos: + title, extension = os.path.splitext(video) + + printLoading(allVideos, video, title) + + slug = f"{allVideosPath}/{video}" + exists = OthersVideos.query.filter_by(slug=slug).first() is not None + if not exists: + with open(slug, "rb") as f: + video_hash = zlib.crc32(f.read()) + + # Conversion du hash en chaîne hexadécimale + video_hash_hex = hex(video_hash)[2:] + + # Récupération des 10 premiers caractères + video_hash = video_hash_hex[:10] + videoDuration = length_video(slug) + middle = videoDuration // 2 + banner = f"{IMAGES_PATH}/Other_Banner_{library}_{video_hash}.webp" + command = [ + "ffmpeg", + "-i", + slug, + "-vf", + f"select='eq(n,{middle})'", + "-vframes", + "1", + f"{banner}", + "-y", + ] + try: + subprocess.run( + command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) + if os.path.getsize(f"{banner}") == 0: + generateImage(title, library, f"{banner}") + banner = f"{IMAGES_PATH}/Other_Banner_{library}_{video_hash}.webp" + except Exception: + banner = "/static/img/broken.webp" + video = OthersVideos( + video_hash=video_hash, + title=title, + slug=slug, + banner=banner, + duration=videoDuration, + library_name=library, + ) + DB.session.add(video) + DB.session.commit() + + for video in OthersVideos.query.filter_by(library_name=library).all(): + path = video.slug + if not os.path.exists(path): + DB.session.delete(video) + DB.session.commit() + + +def getMusics(library): + allMusicsPath = Libraries.query.filter_by(lib_name=library).first().lib_folder + allMusics = os.listdir(allMusicsPath) + + supportedMusicTypes = [".mp3", ".wav", ".ogg", ".flac"] + + allArtists = [ + music for music in allMusics if os.path.isdir(f"{allMusicsPath}/{music}") + ] + + for artist in allArtists: + filesAndDirs = os.listdir(f"{allMusicsPath}/{artist}") + allAlbums = [ + dire + for dire in filesAndDirs + if os.path.isdir(f"{allMusicsPath}/{artist}/{dire}") + ] + allFiles = [ + file + for file in filesAndDirs + if os.path.isfile(f"{allMusicsPath}/{artist}/{file}") + and os.path.splitext(file)[1] in supportedMusicTypes + ] + artist_id = createArtist(artist, library) + artistName = artist + albumsInDB = Albums.query.filter_by(artist_id=artist_id).all() + tracksInDB = Tracks.query.filter_by(artist_id=artist_id).all() + albumsInDB = len([album for album in albumsInDB]) + tracksInDB = len([track for track in tracksInDB]) + if albumsInDB == len(allAlbums) and tracksInDB == len(allFiles): + continue + + startPath = f"{allMusicsPath}/{artist}" + + for album in allAlbums: + albumGuessedData = guessit(album) + if "title" in albumGuessedData: + albumName = albumGuessedData["title"] + else: + albumName, extension = os.path.splitext(album) + + allTracks = os.listdir(f"{startPath}/{album}") + allTracks = [ + track + for track in allTracks + if os.path.splitext(track)[1] in supportedMusicTypes + ] + album_id = createAlbum(albumName, artist_id, allTracks, library) + + for track in allTracks: + slug = f"{startPath}/{album}/{track}" + + exists = Tracks.query.filter_by(slug=slug).first() is not None + if exists: + continue + + title, extension = os.path.splitext(track) + printLoading(allTracks, track, title) + + tags = TinyTag.get(slug, image=True) + + image = tags.get_image() + imagePath = f"{IMAGES_PATH}/Album_{album_id}.webp" + if image is not None: + if not os.path.exists(imagePath): + img = Image.open(io.BytesIO(image)) + img.save(imagePath, "webp") + img.close() + elif not os.path.exists(imagePath): + print(f"L'album {album} n'a pas d'image") + getAlbumImage(album, imagePath) + + if tags.title is not None and tags.title != "" and tags.title != " ": + title = tags.title + else: + guessedData = guessit(title) + + title = "" + + if "title" in guessedData: + title = guessedData["title"] + if title.isdigit(): + title = guessedData["alternative_title"] + else: + if isinstance("episode", list) and "season" in guessedData: + title = f"{guessedData['season']}{' '.join(guessedData['episode'][1])}" + elif "episode" in guessedData and "season" in guessedData: + title = f"{guessedData['season']}{guessedData['episode']}" + + if "release_group" in guessedData: + title += f" ({guessedData['release_group']}" + + imagePath = imagePath.replace(dir_path, "") + + track = Tracks( + name=title, + slug=slug, + album_id=album_id, + artist_id=artist_id, + duration=tags.duration, + cover=imagePath, + library_name=library, + ) + DB.session.add(track) + DB.session.commit() + + for track in allFiles: + slug = f"{startPath}/{track}" + + exists = Tracks.query.filter_by(slug=slug).first() is not None + if exists: + continue + + title, extension = os.path.splitext(track) + printLoading(allFiles, track, title) + + tags = TinyTag.get(slug, image=True) + + image = tags.get_image() + imagePath = f"{IMAGES_PATH}/Album_{artist_id}.webp" + if image is not None: + if not os.path.exists(imagePath): + img = Image.open(io.BytesIO(image)) + img.save(imagePath, "webp") + img.close() + elif not os.path.exists(imagePath): + getArtistImage(artistName, imagePath) + + if tags.title is not None and tags.title != "" and tags.title != " ": + title = tags.title + else: + guessedData = guessit(title) + + title = "" + + if "title" in guessedData: + title = guessedData["title"] + if title.isdigit(): + title = guessedData["alternative_title"] + else: + if isinstance("episode", list) and "season" in guessedData: + title = f"{guessedData['season']}{' '.join(guessedData['episode'][1])}" + elif "episode" in guessedData and "season" in guessedData: + title = f"{guessedData['season']}{guessedData['episode']}" + + if "release_group" in guessedData: + title += f" ({guessedData['release_group']}" + + imagePath = imagePath.replace(dir_path, "") + + track = Tracks( + name=title, + slug=slug, + album_id=0, + artist_id=artist_id, + duration=tags.duration, + cover=imagePath, + library_name=library, + ) + DB.session.add(track) + DB.session.commit() + + allTracks = Tracks.query.filter_by(library_name=library).all() + for track in allTracks: + path = track.slug + if not os.path.exists(path): + DB.session.delete(track) + DB.session.commit() + + allAlbums = Albums.query.filter_by(library_name=library).all() + for album in allAlbums: + tracks = album.tracks + if tracks == "": + DB.session.delete(album) + DB.session.commit() + continue + + allArtists = Artists.query.filter_by(library_name=library).all() + for artist in allArtists: + artist_id = artist.id + albums = Albums.query.filter_by(artist_id=artist_id).all() + tracks = Tracks.query.filter_by(artist_id=artist_id).all() + if len(albums) == 0 and len(tracks) == 0: + DB.session.delete(artist) + DB.session.commit() + continue + + +def getBooks(library): + allBooks = Libraries.query.filter_by(lib_name=library) + allBooksPath = allBooks.first().lib_folder + + allBooks = os.walk(allBooksPath) + books = [] + + for root, dirs, files in allBooks: + for file in files: + path = f"{root}/{file}".replace("\\", "/") + + if file.endswith((".pdf", ".epub", ".cbz", ".cbr")): + books.append(path) + + allBooks = books + + imageFunctions = { + ".pdf": getPDFCover, + ".epub": getEPUBCover, + ".cbz": getCBZCover, + ".cbr": getCBRCover, + } + + for book in allBooks: + name, extension = os.path.splitext(book) + name = name.split("/")[-1] + + printLoading(allBooks, book, name) + + slug = f"{book}" + + exists = Books.query.filter_by(slug=slug).first() is not None + if not exists and not os.path.isdir(slug): + if extension in imageFunctions.keys(): + book_cover, book_type = "temp", "temp" + book = Books( + title=name, + slug=slug, + book_type=book_type, + cover=book_cover, + library_name=library, + ) + DB.session.add(book) + DB.session.commit() + book_id = book.id + book_cover, book_type = imageFunctions[extension](slug, name, book_id) + book.cover = book_cover + book.book_type = book_type + DB.session.commit() + allBooksInDb = Books.query.filter_by(library_name=library).all() + for book in allBooksInDb: + if not os.path.exists(book.slug): + DB.session.delete(book) + DB.session.commit() + + +def getPDFCover(path, name, id): + pdfDoc = fitz.open(path) + # Récupérez la page demandée + page = pdfDoc[0] + # Créez une image à partir de la page + pix = page.get_pixmap() + # Enregistre l'image + img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) + if os.path.exists(f"{IMAGES_PATH}/Books_Banner_{id}.webp"): + os.remove(f"{IMAGES_PATH}/Books_Banner_{id}.webp") + + img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") + path = f"{IMAGES_PATH}/Books_Banner_{id}.webp" + return path, "PDF" + + +def getEPUBCover(path, name, id): + pdfDoc = fitz.open(path) + # Récupérez la page demandée + page = pdfDoc[0] + # Créez une image à partir de la page + pix = page.get_pixmap() + # Enregistre l'image + img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) + + if os.path.exists(f"{IMAGES_PATH}/Books_Banner_{id}.webp"): + os.remove(f"{IMAGES_PATH}/Books_Banner_{id}.webp") + + img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") + img.close() + pdfDoc.close() + path = f"{IMAGES_PATH}/Books_Banner_{id}.webp" + + return path, "EPUB" + + +def getCBZCover(path, name, id): + try: + with zipfile.ZipFile(path, "r") as zip_ref: + # Parcourt tous les fichiers à l'intérieur du CBZ + for file in zip_ref.filelist: + # Vérifie si le fichier est une image + if file.filename.endswith(".jpg") or file.filename.endswith(".png"): + # Ouvre le fichier image + with zip_ref.open(file) as image_file: + img = Image.open(io.BytesIO(image_file.read())) + img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") + img.close() + break + elif file.filename.endswith("/"): + with zip_ref.open(file) as image_file: + for file in zip_ref.filelist: + if file.filename.endswith(".jpg") or file.filename.endswith( + ".png" + ): + # Ouvre le fichier image + with zip_ref.open(file) as image_file: + img = Image.open(io.BytesIO(image_file.read())) + # Enregistre l'image + img.save( + f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp" + ) + img.close() + break + return f"{IMAGES_PATH}/Books_Banner_{id}.webp", "CBZ" + except Exception: + return getCBRCover(path, name, id) + + +def getCBRCover(path, name, id): + name = name.replace(" ", "_").replace("#", "") + try: + with rarfile.RarFile(path, "r") as rar_ref: + # Parcourt tous les fichiers à l'intérieur du CBR + for file in rar_ref.infolist(): + # Vérifie si le fichier est une image + if file.filename.endswith(".jpg") or file.filename.endswith(".png"): + # Ouvre le fichier image + with rar_ref.open(file) as image_file: + img = Image.open(io.BytesIO(image_file.read())) + # Enregistre l'image + img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") + img.close() + break + elif file.filename.endswith("/"): + with rar_ref.open(file) as image_file: + img = Image.open(io.BytesIO(image_file.read())) + # Enregistre l'image + img.save(f"{IMAGES_PATH}/Books_Banner_{id}.webp", "webp") + img.close() + break + + return f"{IMAGES_PATH}/Books_Banner_{id}.webp", "CBR" + except rarfile.NotRarFile: + return getCBZCover(path, name, id) diff --git a/src/chocolate_app/send_languages_to_weblate.py b/src/chocolate_app/send_languages_to_weblate.py index 8d5e278..36308fa 100644 --- a/src/chocolate_app/send_languages_to_weblate.py +++ b/src/chocolate_app/send_languages_to_weblate.py @@ -1,49 +1,49 @@ -import os -import requests - -directory = r'G:\Projets\Chocolate\src\chocolate\static\lang' -api_url_template = "https://hosted.weblate.org/api/translations/chocolate/{component}/{lang}/file/" -token = "wlu_Hxw8cIkWrl0DXIPWwFW3kGbi7iMKAk9cckUP" -author_name = "imprevisible" -author_email = "impr.visible@gmail.com" -upload_method = "translate" -conflit = "ignore" -fuzzy = "process" - -def upload_file_to_weblate(file_path, component, lang): - headers = { - "Authorization": f"Token {token}" - } - - url = api_url_template.format(component=component, lang=lang) - with open(file_path, 'rb') as file: - form = { - 'file': (os.path.basename(file_path), file), - 'email': author_email, - 'author': author_name, - } - data = { - 'message': upload_method, - 'conflict': conflit, - 'fuzzy': fuzzy, - } - response = requests.post(url, headers=headers, files=form, data=data) - - if response.status_code == 200: - print(f"Uploaded {file_path} to Weblate for language {lang} ({response.text})") - else: - print(f"Failed to upload {file_path} to Weblate for language {lang}. Status code: {response.status_code}") - print(response.text) - print(response) - -def process_files_in_directory(directory): - component = "translation" # Replace with the actual component slug - lang_files = [file for file in os.listdir(directory) if file.endswith('.json')] - for lang_file in lang_files: - file_path = os.path.join(directory, lang_file) - lang = os.path.splitext(lang_file)[0] - print(f"Processing {file_path} for language {lang}") - upload_file_to_weblate(file_path, component, lang) - -if __name__ == "__main__": - process_files_in_directory(directory) +import os +import requests + +directory = r'G:\Projets\Chocolate\src\chocolate\static\lang' +api_url_template = "https://hosted.weblate.org/api/translations/chocolate/{component}/{lang}/file/" +token = "wlu_Hxw8cIkWrl0DXIPWwFW3kGbi7iMKAk9cckUP" +author_name = "imprevisible" +author_email = "impr.visible@gmail.com" +upload_method = "translate" +conflit = "ignore" +fuzzy = "process" + +def upload_file_to_weblate(file_path, component, lang): + headers = { + "Authorization": f"Token {token}" + } + + url = api_url_template.format(component=component, lang=lang) + with open(file_path, 'rb') as file: + form = { + 'file': (os.path.basename(file_path), file), + 'email': author_email, + 'author': author_name, + } + data = { + 'message': upload_method, + 'conflict': conflit, + 'fuzzy': fuzzy, + } + response = requests.post(url, headers=headers, files=form, data=data) + + if response.status_code == 200: + print(f"Uploaded {file_path} to Weblate for language {lang} ({response.text})") + else: + print(f"Failed to upload {file_path} to Weblate for language {lang}. Status code: {response.status_code}") + print(response.text) + print(response) + +def process_files_in_directory(directory): + component = "translation" # Replace with the actual component slug + lang_files = [file for file in os.listdir(directory) if file.endswith('.json')] + for lang_file in lang_files: + file_path = os.path.join(directory, lang_file) + lang = os.path.splitext(lang_file)[0] + print(f"Processing {file_path} for language {lang}") + upload_file_to_weblate(file_path, component, lang) + +if __name__ == "__main__": + process_files_in_directory(directory) diff --git a/src/chocolate_app/tables.py b/src/chocolate_app/tables.py index 57f68f4..357ee29 100644 --- a/src/chocolate_app/tables.py +++ b/src/chocolate_app/tables.py @@ -1,360 +1,360 @@ -from flask_login import UserMixin -from werkzeug.security import check_password_hash, generate_password_hash -from time import time - -from . import DB - -class Users(DB.Model, UserMixin): - id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) - name = DB.Column(DB.String(255), unique=True) - password = DB.Column(DB.String(255)) - profil_picture = DB.Column(DB.String(255)) - account_type = DB.Column(DB.String(255)) - - def __init__(self, name, password, profil_picture, account_type): - self.name = name - if password is not None and password != "": - self.password = generate_password_hash(password) - else: - self.password = None - self.profil_picture = profil_picture - self.account_type = account_type - - def __repr__(self) -> str: - return f"" - - def verify_password(self, pwd): - if not self.password: - return True - return check_password_hash(self.password, pwd) - - -class Movies(DB.Model): - id = DB.Column(DB.Integer, primary_key=True) - title = DB.Column(DB.String(255), primary_key=True) - real_title = DB.Column(DB.String(255), primary_key=True) - cover = DB.Column(DB.String(255)) - banner = DB.Column(DB.String(255)) - slug = DB.Column(DB.String(255)) - description = DB.Column(DB.String(2550)) - note = DB.Column(DB.String(255)) - date = DB.Column(DB.String(255)) - genre = DB.Column(DB.String(255)) - duration = DB.Column(DB.String(255)) - cast = DB.Column(DB.String(255)) - bande_annonce_url = DB.Column(DB.String(255)) - adult = DB.Column(DB.String(255)) - library_name = DB.Column(DB.String(255)) - alternatives_names = DB.Column(DB.Text) - file_date = DB.Column(DB.Float) - - def __repr__(self) -> str: - return f"" - - -class Series(DB.Model): - id = DB.Column(DB.Integer, primary_key=True) - name = DB.Column(DB.String(255), primary_key=True) - original_name = DB.Column(DB.String(255), primary_key=True) - genre = DB.Column(DB.String(255)) - duration = DB.Column(DB.String(255)) - description = DB.Column(DB.String(2550)) - cast = DB.Column(DB.String(255)) - bande_annonce_url = DB.Column(DB.String(255)) - cover = DB.Column(DB.String(255)) - banner = DB.Column(DB.String(255)) - note = DB.Column(DB.String(255)) - date = DB.Column(DB.String(255)) - serie_modified_time = DB.Column(DB.Float) - library_name = DB.Column(DB.String(255)) - adult = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class Seasons(DB.Model): - serie = DB.Column(DB.Integer, nullable=False) - season_id = DB.Column(DB.Integer, primary_key=True) - season_number = DB.Column(DB.Integer, primary_key=True) - release = DB.Column(DB.String(255)) - episodes_number = DB.Column(DB.String(255)) - season_name = DB.Column(DB.String(255)) - season_description = DB.Column(DB.Text) - cover = DB.Column(DB.String(255)) - modified_date = DB.Column(DB.Float) - number_of_episode_in_folder = DB.Column(DB.Integer) - - def __repr__(self) -> str: - return f"" - - -class Episodes(DB.Model): - season_id = DB.Column(DB.Integer, nullable=False) - episode_id = DB.Column(DB.Integer, primary_key=True) - episode_name = DB.Column(DB.String(255), primary_key=True) - episode_number = DB.Column(DB.Integer, primary_key=True) - episode_description = DB.Column(DB.Text) - episode_cover_path = DB.Column(DB.String(255)) - release_date = DB.Column(DB.String(255)) - slug = DB.Column(DB.String(255)) - intro_start = DB.Column(DB.Float) - intro_end = DB.Column(DB.Float) - - def __repr__(self) -> str: - return f"" - - -class Games(DB.Model): - console = DB.Column(DB.String(255), nullable=False) - id = DB.Column(DB.Integer, primary_key=True) - title = DB.Column(DB.String(255), primary_key=True) - real_title = DB.Column(DB.String(255), primary_key=True) - cover = DB.Column(DB.String(255)) - description = DB.Column(DB.String(2550)) - note = DB.Column(DB.String(255)) - date = DB.Column(DB.String(255)) - genre = DB.Column(DB.String(255)) - slug = DB.Column(DB.String(255)) - library_name = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class OthersVideos(DB.Model): - video_hash = DB.Column(DB.String(255), primary_key=True) - title = DB.Column(DB.String(255), primary_key=True) - slug = DB.Column(DB.String(255)) - banner = DB.Column(DB.String(255)) - duration = DB.Column(DB.String(255)) - library_name = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class Books(DB.Model): - id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) - title = DB.Column(DB.String(255)) - slug = DB.Column(DB.String(255)) - book_type = DB.Column(DB.String(255)) - cover = DB.Column(DB.String(255)) - library_name = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class Artists(DB.Model): - """ - Artists model - - ... - - Attributes - ---------- - id : int - artist id - name : str - artist name - cover : str - artist cover path - library_name : str - artist library name - """ - - id = DB.Column(DB.Text, primary_key=True) - name = DB.Column(DB.String(255)) - cover = DB.Column(DB.String(255)) - library_name = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class Albums(DB.Model): - """ - Albums model - - ... - - Attributes - ---------- - artist_id : int - artist id - id : int - album id - name : str - album name - dir_name : str - album dir name - cover : str - album cover path - tracks : str - album tracks - library_name : str - album library name - """ - - artist_id = DB.Column(DB.Integer, primary_key=True) - id = DB.Column(DB.Integer, primary_key=True) - name = DB.Column(DB.String(255)) - dir_name = DB.Column(DB.String(255)) - cover = DB.Column(DB.String(255)) - tracks = DB.Column(DB.Text) - library_name = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class Tracks(DB.Model): - """ - Tracks model - - ... - - Attributes - ---------- - artist_id : int - artist id - album_id : int - album id - id : int - track id - name : str - track name - slug : str - track slug - duration : int - track duration - cover: str - track cover path - library_name : str - track library name - """ - - artist_id = DB.Column(DB.Integer) - album_id = DB.Column(DB.Integer) - id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) - name = DB.Column(DB.String(255)) - slug = DB.Column(DB.String(255)) - duration = DB.Column(DB.Integer) - cover = DB.Column(DB.String(255)) - library_name = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class Playlists(DB.Model): - """ - Playlist model - - ... - - Attributes - ---------- - user_id : int - user id - id : int - playlist id - name : str - playlist name - tracks : str - playlist tracks - duration : int - playlist duration - cover : str - playlist cover path - library_name : str - playlist library name - """ - - user_id = DB.Column(DB.Integer) - id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) - name = DB.Column(DB.String(255)) - tracks = DB.Column(DB.Text) - duration = DB.Column(DB.Integer) - cover = DB.Column(DB.String(255)) - library_name = DB.Column(DB.String(255)) - - def __repr__(self) -> str: - return f"" - - -class Language(DB.Model): - language = DB.Column(DB.String(255), primary_key=True) - - def __repr__(self) -> str: - return f"" - - -class Actors(DB.Model): - name = DB.Column(DB.String(255), primary_key=True) - actor_id = DB.Column(DB.Integer, primary_key=True) - actor_image = DB.Column(DB.Text) - actor_description = DB.Column(DB.String(2550)) - actor_birth_date = DB.Column(DB.String(255)) - actor_birth_place = DB.Column(DB.String(255)) - actor_programs = DB.Column(DB.Text) - - def __repr__(self) -> str: - return f"" - - -class Libraries(DB.Model): - lib_name = DB.Column(DB.Text, primary_key=True) - lib_image = DB.Column(DB.Text) - lib_type = DB.Column(DB.Text) - lib_folder = DB.Column(DB.Text) - available_for = DB.Column(DB.Text) - - def __repr__(self) -> str: - return f"" - - -# une classe qui stocke le nombre de fois qu'a été joué une musique par un utilisateur -class MusicPlayed(DB.Model): - user_id = DB.Column(DB.Integer, primary_key=True) - music_id = DB.Column(DB.Integer, primary_key=True) - play_count = DB.Column(DB.Integer) - - def __repr__(self) -> str: - return f"" - - -# une classe qui stocle les likes d'un utilisateur -class MusicLiked(DB.Model): - user_id = DB.Column(DB.Integer, primary_key=True) - music_id = DB.Column(DB.Integer, primary_key=True) - liked = DB.Column(DB.Integer) - liked_at = DB.Column(DB.Integer, default=int(time())) - - def __repr__(self) -> str: - return f"" - - -class LatestEpisodeWatched(DB.Model): - user_id = DB.Column(DB.Integer, primary_key=True) - serie_id = DB.Column(DB.Integer, primary_key=True) - episode_id = DB.Column(DB.Integer) - - def __repr__(self) -> str: - return f"" - - -class InviteCodes(DB.Model): - code = DB.Column(DB.String(255), primary_key=True) - - def __repr__(self) -> str: - return f"" - - -class LibrariesMerge(DB.Model): - parent_lib = DB.Column(DB.String(255), primary_key=True) - child_lib = DB.Column(DB.String(255), primary_key=True) - - def __repr__(self) -> str: - return f"" +from flask_login import UserMixin +from werkzeug.security import check_password_hash, generate_password_hash +from time import time + +from . import DB + +class Users(DB.Model, UserMixin): + id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) + name = DB.Column(DB.String(255), unique=True) + password = DB.Column(DB.String(255)) + profil_picture = DB.Column(DB.String(255)) + account_type = DB.Column(DB.String(255)) + + def __init__(self, name, password, profil_picture, account_type): + self.name = name + if password is not None and password != "": + self.password = generate_password_hash(password) + else: + self.password = None + self.profil_picture = profil_picture + self.account_type = account_type + + def __repr__(self) -> str: + return f"" + + def verify_password(self, pwd): + if not self.password: + return True + return check_password_hash(self.password, pwd) + + +class Movies(DB.Model): + id = DB.Column(DB.Integer, primary_key=True) + title = DB.Column(DB.String(255), primary_key=True) + real_title = DB.Column(DB.String(255), primary_key=True) + cover = DB.Column(DB.String(255)) + banner = DB.Column(DB.String(255)) + slug = DB.Column(DB.String(255)) + description = DB.Column(DB.String(2550)) + note = DB.Column(DB.String(255)) + date = DB.Column(DB.String(255)) + genre = DB.Column(DB.String(255)) + duration = DB.Column(DB.String(255)) + cast = DB.Column(DB.String(255)) + bande_annonce_url = DB.Column(DB.String(255)) + adult = DB.Column(DB.String(255)) + library_name = DB.Column(DB.String(255)) + alternatives_names = DB.Column(DB.Text) + file_date = DB.Column(DB.Float) + + def __repr__(self) -> str: + return f"" + + +class Series(DB.Model): + id = DB.Column(DB.Integer, primary_key=True) + name = DB.Column(DB.String(255), primary_key=True) + original_name = DB.Column(DB.String(255), primary_key=True) + genre = DB.Column(DB.String(255)) + duration = DB.Column(DB.String(255)) + description = DB.Column(DB.String(2550)) + cast = DB.Column(DB.String(255)) + bande_annonce_url = DB.Column(DB.String(255)) + cover = DB.Column(DB.String(255)) + banner = DB.Column(DB.String(255)) + note = DB.Column(DB.String(255)) + date = DB.Column(DB.String(255)) + serie_modified_time = DB.Column(DB.Float) + library_name = DB.Column(DB.String(255)) + adult = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class Seasons(DB.Model): + serie = DB.Column(DB.Integer, nullable=False) + season_id = DB.Column(DB.Integer, primary_key=True) + season_number = DB.Column(DB.Integer, primary_key=True) + release = DB.Column(DB.String(255)) + episodes_number = DB.Column(DB.String(255)) + season_name = DB.Column(DB.String(255)) + season_description = DB.Column(DB.Text) + cover = DB.Column(DB.String(255)) + modified_date = DB.Column(DB.Float) + number_of_episode_in_folder = DB.Column(DB.Integer) + + def __repr__(self) -> str: + return f"" + + +class Episodes(DB.Model): + season_id = DB.Column(DB.Integer, nullable=False) + episode_id = DB.Column(DB.Integer, primary_key=True) + episode_name = DB.Column(DB.String(255), primary_key=True) + episode_number = DB.Column(DB.Integer, primary_key=True) + episode_description = DB.Column(DB.Text) + episode_cover_path = DB.Column(DB.String(255)) + release_date = DB.Column(DB.String(255)) + slug = DB.Column(DB.String(255)) + intro_start = DB.Column(DB.Float) + intro_end = DB.Column(DB.Float) + + def __repr__(self) -> str: + return f"" + + +class Games(DB.Model): + console = DB.Column(DB.String(255), nullable=False) + id = DB.Column(DB.Integer, primary_key=True) + title = DB.Column(DB.String(255), primary_key=True) + real_title = DB.Column(DB.String(255), primary_key=True) + cover = DB.Column(DB.String(255)) + description = DB.Column(DB.String(2550)) + note = DB.Column(DB.String(255)) + date = DB.Column(DB.String(255)) + genre = DB.Column(DB.String(255)) + slug = DB.Column(DB.String(255)) + library_name = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class OthersVideos(DB.Model): + video_hash = DB.Column(DB.String(255), primary_key=True) + title = DB.Column(DB.String(255), primary_key=True) + slug = DB.Column(DB.String(255)) + banner = DB.Column(DB.String(255)) + duration = DB.Column(DB.String(255)) + library_name = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class Books(DB.Model): + id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) + title = DB.Column(DB.String(255)) + slug = DB.Column(DB.String(255)) + book_type = DB.Column(DB.String(255)) + cover = DB.Column(DB.String(255)) + library_name = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class Artists(DB.Model): + """ + Artists model + + ... + + Attributes + ---------- + id : int + artist id + name : str + artist name + cover : str + artist cover path + library_name : str + artist library name + """ + + id = DB.Column(DB.Text, primary_key=True) + name = DB.Column(DB.String(255)) + cover = DB.Column(DB.String(255)) + library_name = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class Albums(DB.Model): + """ + Albums model + + ... + + Attributes + ---------- + artist_id : int + artist id + id : int + album id + name : str + album name + dir_name : str + album dir name + cover : str + album cover path + tracks : str + album tracks + library_name : str + album library name + """ + + artist_id = DB.Column(DB.Integer, primary_key=True) + id = DB.Column(DB.Integer, primary_key=True) + name = DB.Column(DB.String(255)) + dir_name = DB.Column(DB.String(255)) + cover = DB.Column(DB.String(255)) + tracks = DB.Column(DB.Text) + library_name = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class Tracks(DB.Model): + """ + Tracks model + + ... + + Attributes + ---------- + artist_id : int + artist id + album_id : int + album id + id : int + track id + name : str + track name + slug : str + track slug + duration : int + track duration + cover: str + track cover path + library_name : str + track library name + """ + + artist_id = DB.Column(DB.Integer) + album_id = DB.Column(DB.Integer) + id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) + name = DB.Column(DB.String(255)) + slug = DB.Column(DB.String(255)) + duration = DB.Column(DB.Integer) + cover = DB.Column(DB.String(255)) + library_name = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class Playlists(DB.Model): + """ + Playlist model + + ... + + Attributes + ---------- + user_id : int + user id + id : int + playlist id + name : str + playlist name + tracks : str + playlist tracks + duration : int + playlist duration + cover : str + playlist cover path + library_name : str + playlist library name + """ + + user_id = DB.Column(DB.Integer) + id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) + name = DB.Column(DB.String(255)) + tracks = DB.Column(DB.Text) + duration = DB.Column(DB.Integer) + cover = DB.Column(DB.String(255)) + library_name = DB.Column(DB.String(255)) + + def __repr__(self) -> str: + return f"" + + +class Language(DB.Model): + language = DB.Column(DB.String(255), primary_key=True) + + def __repr__(self) -> str: + return f"" + + +class Actors(DB.Model): + name = DB.Column(DB.String(255), primary_key=True) + actor_id = DB.Column(DB.Integer, primary_key=True) + actor_image = DB.Column(DB.Text) + actor_description = DB.Column(DB.String(2550)) + actor_birth_date = DB.Column(DB.String(255)) + actor_birth_place = DB.Column(DB.String(255)) + actor_programs = DB.Column(DB.Text) + + def __repr__(self) -> str: + return f"" + + +class Libraries(DB.Model): + lib_name = DB.Column(DB.Text, primary_key=True) + lib_image = DB.Column(DB.Text) + lib_type = DB.Column(DB.Text) + lib_folder = DB.Column(DB.Text) + available_for = DB.Column(DB.Text) + + def __repr__(self) -> str: + return f"" + + +# une classe qui stocke le nombre de fois qu'a été joué une musique par un utilisateur +class MusicPlayed(DB.Model): + user_id = DB.Column(DB.Integer, primary_key=True) + music_id = DB.Column(DB.Integer, primary_key=True) + play_count = DB.Column(DB.Integer) + + def __repr__(self) -> str: + return f"" + + +# une classe qui stocle les likes d'un utilisateur +class MusicLiked(DB.Model): + user_id = DB.Column(DB.Integer, primary_key=True) + music_id = DB.Column(DB.Integer, primary_key=True) + liked = DB.Column(DB.Integer) + liked_at = DB.Column(DB.Integer, default=int(time())) + + def __repr__(self) -> str: + return f"" + + +class LatestEpisodeWatched(DB.Model): + user_id = DB.Column(DB.Integer, primary_key=True) + serie_id = DB.Column(DB.Integer, primary_key=True) + episode_id = DB.Column(DB.Integer) + + def __repr__(self) -> str: + return f"" + + +class InviteCodes(DB.Model): + code = DB.Column(DB.String(255), primary_key=True) + + def __repr__(self) -> str: + return f"" + + +class LibrariesMerge(DB.Model): + parent_lib = DB.Column(DB.String(255), primary_key=True) + child_lib = DB.Column(DB.String(255), primary_key=True) + + def __repr__(self) -> str: + return f"" diff --git a/src/chocolate_app/templates/index.html b/src/chocolate_app/templates/index.html index d01f779..5ee9dd3 100644 --- a/src/chocolate_app/templates/index.html +++ b/src/chocolate_app/templates/index.html @@ -1,11 +1 @@ - - - - - - Document - - - - - \ No newline at end of file +Chocolate
\ No newline at end of file diff --git a/src/chocolate_app/utils/utils.py b/src/chocolate_app/utils/utils.py index efc2082..cf6555e 100644 --- a/src/chocolate_app/utils/utils.py +++ b/src/chocolate_app/utils/utils.py @@ -1,121 +1,121 @@ -import os -import datetime -import json - -from flask import abort - -from chocolate_app import all_auth_tokens, get_dir_path -from chocolate_app.tables import Users, Libraries - -dir_path = get_dir_path() - - -def generate_log(request, component): - method = request.method - - token = request.headers.get("Authorization") - - path = request.path - - try: - data = request.get_json() - except Exception: - data = None - - if token and token in all_auth_tokens: - user = all_auth_tokens[token]["user"] - if user: - try: - user = Users.query.filter_by(name=user).first() - if user: - username = user.name - else: - username = f"token {token}" - except Exception: - username = f"token {token}" - else: - username = f"token {token}" - else: - username = f"Token {token}" - - if username == "Token Bearer null": - username = "Unknown" - - if not data: - message = f"Request {method} at {path} from {username}" - else: - if "password" in data: - data["password"] = "********" - if "image" in data: - data["image"] = "Image as a base64 (to long)" - message = ( - f"Request {method} at {path} from {username} with data: {json.dumps(data)}" - ) - - log("INFO", component, message) - - -def log(log_type, log_composant, log_message): - the_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - log = f"{the_time} - [{log_type}] [{log_composant}] {log_message}\n" - - # if file does not exist, create it - if not os.path.exists(path_join(dir_path, "server.log")): - with open(path_join(dir_path, "server.log"), "w") as logs: - logs.write(log) - return - - with open(path_join(dir_path, "server.log"), "r") as logs: - if log in logs.read(): - return - - with open(path_join(dir_path, "server.log"), "a") as logs: - logs.write(log) - - -def path_join(*args): - return "/".join(args).replace("\\", "/") - - -def check_authorization(request, token, library=None): - if token not in all_auth_tokens: - generate_log(request, "UNAUTHORIZED") - abort(401) - - username = all_auth_tokens[token]["user"] - - if library: - the_lib = Libraries.query.filter_by(lib_name=library).first() - - if not the_lib: - generate_log(request, "ERROR") - abort(404) - - user = Users.query.filter_by(name=username).first() - user_in_the_lib = user_in_lib(user.id, the_lib) - - if not user_in_the_lib: - generate_log(request, "UNAUTHORIZED") - abort(401) - - if the_lib is None or user is None: - generate_log(request, "ERROR") - abort(404) - - -def user_in_lib(user_id, lib): - user = Users.query.filter_by(id=user_id).first() - - if not user: - return False - - user_id = str(user.id) - - if not isinstance(lib, dict): - lib = lib.__dict__ - - available_for = str(lib["available_for"]).split(",") - - if not lib["available_for"] or user_id in available_for: - return True - return False +import os +import datetime +import json + +from flask import abort + +from chocolate_app import all_auth_tokens, get_dir_path, LOG_PATH +from chocolate_app.tables import Users, Libraries + +dir_path = get_dir_path() + + +def generate_log(request, component): + method = request.method + + token = request.headers.get("Authorization") + + path = request.path + + try: + data = request.get_json() + except Exception: + data = None + + if token and token in all_auth_tokens: + user = all_auth_tokens[token]["user"] + if user: + try: + user = Users.query.filter_by(name=user).first() + if user: + username = user.name + else: + username = f"token {token}" + except Exception: + username = f"token {token}" + else: + username = f"token {token}" + else: + username = f"Token {token}" + + if username == "Token Bearer null": + username = "Unknown" + + if not data: + message = f"Request {method} at {path} from {username}" + else: + if "password" in data: + data["password"] = "********" + if "image" in data: + data["image"] = "Image as a base64 (to long)" + message = ( + f"Request {method} at {path} from {username} with data: {json.dumps(data)}" + ) + + log("INFO", component, message) + + +def log(log_type, log_composant, log_message): + the_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + log = f"{the_time} - [{log_type}] [{log_composant}] {log_message}\n" + + # if file does not exist, create it + if not os.path.exists(LOG_PATH): + with open(LOG_PATH, "w") as logs: + logs.write(log) + return + + with open(LOG_PATH, "r") as logs: + if log in logs.read(): + return + + with open(LOG_PATH, "a") as logs: + logs.write(log) + + +def path_join(*args): + return "/".join(args).replace("\\", "/") + + +def check_authorization(request, token, library=None): + if token not in all_auth_tokens: + generate_log(request, "UNAUTHORIZED") + abort(401) + + username = all_auth_tokens[token]["user"] + + if library: + the_lib = Libraries.query.filter_by(lib_name=library).first() + + if not the_lib: + generate_log(request, "ERROR") + abort(404) + + user = Users.query.filter_by(name=username).first() + user_in_the_lib = user_in_lib(user.id, the_lib) + + if not user_in_the_lib: + generate_log(request, "UNAUTHORIZED") + abort(401) + + if the_lib is None or user is None: + generate_log(request, "ERROR") + abort(404) + + +def user_in_lib(user_id, lib): + user = Users.query.filter_by(id=user_id).first() + + if not user or not lib: + return False + + user_id = str(user.id) + + if not isinstance(lib, dict): + lib = lib.__dict__ + + available_for = str(lib["available_for"]).split(",") + + if not lib["available_for"] or user_id in available_for: + return True + return False